aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/CMakeLists.txt2
-rw-r--r--llvm/cmake/modules/LLVMProcessSources.cmake20
-rw-r--r--llvm/docs/CommandGuide/lit.rst5
-rw-r--r--llvm/docs/CommandGuide/llvm-ir2vec.rst82
-rw-r--r--llvm/docs/HowToCrossCompileBuiltinsOnArm.rst227
-rw-r--r--llvm/docs/LangRef.rst2
-rw-r--r--llvm/docs/ReleaseNotes.md3
-rw-r--r--llvm/docs/TableGen/BackGuide.rst8
-rw-r--r--llvm/include/llvm/ADT/Any.h1
-rw-r--r--llvm/include/llvm/Analysis/DXILResource.h82
-rw-r--r--llvm/include/llvm/Analysis/IR2Vec.h36
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h4
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfo.h4
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfoImpl.h2
-rw-r--r--llvm/include/llvm/Analysis/VectorUtils.h3
-rw-r--r--llvm/include/llvm/BinaryFormat/COFF.h19
-rw-r--r--llvm/include/llvm/BinaryFormat/ELF.h3
-rw-r--r--llvm/include/llvm/BinaryFormat/SFrame.h31
-rw-r--r--llvm/include/llvm/BinaryFormat/SFrameConstants.def41
-rw-r--r--llvm/include/llvm/Bitstream/BitstreamWriter.h5
-rw-r--r--llvm/include/llvm/CodeGen/AsmPrinter.h39
-rw-r--r--llvm/include/llvm/CodeGen/GCMetadata.h28
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h6
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h42
-rw-r--r--llvm/include/llvm/CodeGen/MachineBasicBlock.h9
-rw-r--r--llvm/include/llvm/CodeGen/MachineFunction.h10
-rw-r--r--llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h2
-rw-r--r--llvm/include/llvm/CodeGen/MachineInstr.h4
-rw-r--r--llvm/include/llvm/CodeGen/MachineInstrBuilder.h92
-rw-r--r--llvm/include/llvm/CodeGen/MachineInstrBundle.h4
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGNodes.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h19
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h9
-rw-r--r--llvm/include/llvm/Frontend/HLSL/HLSLBinding.h162
-rw-r--r--llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h115
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/ClauseT.h5
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMP.td2
-rw-r--r--llvm/include/llvm/IR/GCStrategy.h5
-rw-r--r--llvm/include/llvm/IR/IntrinsicInst.h17
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td111
-rw-r--r--llvm/include/llvm/IR/NVVMIntrinsicUtils.h2
-rw-r--r--llvm/include/llvm/IR/RuntimeLibcalls.h2
-rw-r--r--llvm/include/llvm/LTO/LTO.h26
-rw-r--r--llvm/include/llvm/MC/DXContainerRootSignature.h5
-rw-r--r--llvm/include/llvm/MC/MCAssembler.h2
-rw-r--r--llvm/include/llvm/MC/MCObjectFileInfo.h5
-rw-r--r--llvm/include/llvm/MC/MCObjectStreamer.h11
-rw-r--r--llvm/include/llvm/MC/MCSection.h44
-rw-r--r--llvm/include/llvm/Object/SFrameParser.h27
-rw-r--r--llvm/include/llvm/ObjectYAML/ELFYAML.h2
-rw-r--r--llvm/include/llvm/Passes/CodeGenPassBuilder.h16
-rw-r--r--llvm/include/llvm/ProfileData/MemProfData.inc40
-rw-r--r--llvm/include/llvm/Support/AArch64AttributeParser.h2
-rw-r--r--llvm/include/llvm/Support/VirtualFileSystem.h2
-rw-r--r--llvm/include/llvm/Support/Windows/WindowsSupport.h4
-rw-r--r--llvm/include/llvm/Target/CGPassBuilderOption.h2
-rw-r--r--llvm/include/llvm/Target/GlobalISel/Combine.td1
-rw-r--r--llvm/include/llvm/TextAPI/SymbolSet.h2
-rw-r--r--llvm/include/llvm/Transforms/Scalar/Reassociate.h3
-rw-r--r--llvm/include/llvm/Transforms/Utils/Mem2Reg.h3
-rw-r--r--llvm/include/llvm/Transforms/Utils/ProfileVerify.h5
-rw-r--r--llvm/lib/Analysis/CMakeLists.txt1
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp13
-rw-r--r--llvm/lib/Analysis/DXILResource.cpp136
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp14
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Analysis/UniformityAnalysis.cpp1
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp6
-rw-r--r--llvm/lib/BinaryFormat/SFrame.cpp33
-rw-r--r--llvm/lib/CGData/StableFunctionMapRecord.cpp16
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp108
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp23
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CallLowering.cpp6
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp4
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp18
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp22
-rw-r--r--llvm/lib/CodeGen/MIRParser/MILexer.cpp1
-rw-r--r--llvm/lib/CodeGen/MIRParser/MILexer.h1
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIParser.cpp5
-rw-r--r--llvm/lib/CodeGen/MIRPrinter.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineFunction.cpp20
-rw-r--r--llvm/lib/CodeGen/MachineInstr.cpp6
-rw-r--r--llvm/lib/CodeGen/ModuloSchedule.cpp2
-rw-r--r--llvm/lib/CodeGen/RegisterCoalescer.cpp179
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp123
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp3
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp83
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp2
-rw-r--r--llvm/lib/CodeGen/TailDuplicator.cpp18
-rw-r--r--llvm/lib/CodeGen/TargetInstrInfo.cpp2
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp2
-rw-r--r--llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp14
-rw-r--r--llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp119
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h3
-rw-r--r--llvm/lib/Frontend/HLSL/CMakeLists.txt1
-rw-r--r--llvm/lib/Frontend/HLSL/HLSLBinding.cpp142
-rw-r--r--llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp464
-rw-r--r--llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp7
-rw-r--r--llvm/lib/LTO/LTO.cpp118
-rw-r--r--llvm/lib/MC/MCObjectFileInfo.cpp20
-rw-r--r--llvm/lib/MC/MCObjectStreamer.cpp119
-rw-r--r--llvm/lib/MC/MCWin64EH.cpp3
-rw-r--r--llvm/lib/MC/MCWinCOFFStreamer.cpp5
-rw-r--r--llvm/lib/ObjCopy/COFF/COFFReader.cpp2
-rw-r--r--llvm/lib/Object/ELFObjectFile.cpp9
-rw-r--r--llvm/lib/Object/SFrameParser.cpp72
-rw-r--r--llvm/lib/ObjectYAML/ELFEmitter.cpp6
-rw-r--r--llvm/lib/ObjectYAML/ELFYAML.cpp2
-rw-r--r--llvm/lib/ProfileData/MemProfReader.cpp71
-rw-r--r--llvm/lib/Support/BLAKE3/CMakeLists.txt3
-rw-r--r--llvm/lib/Support/FileCollector.cpp3
-rw-r--r--llvm/lib/Support/Unix/Path.inc2
-rw-r--r--llvm/lib/Support/VirtualFileSystem.cpp11
-rw-r--r--llvm/lib/Support/Windows/Threading.inc62
-rw-r--r--llvm/lib/Target/AArch64/AArch64Combine.td10
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp32
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp113
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td9
-rw-r--r--llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp57
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp13
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp4
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp47
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp10
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp90
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td13
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp23
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUGISel.td3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp29
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp36
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructions.td1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp65
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp41
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp45
-rw-r--r--llvm/lib/Target/AMDGPU/BUFInstructions.td12
-rw-r--r--llvm/lib/Target/AMDGPU/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/AMDGPU/DSInstructions.td3
-rw-r--r--llvm/lib/Target/AMDGPU/FLATInstructions.td61
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h18
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h2
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/SIDefines.h1
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp69
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp20
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td14
-rw-r--r--llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp20
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.td2
-rw-r--r--llvm/lib/Target/AMDGPU/SOPInstructions.td7
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h1
-rw-r--r--llvm/lib/Target/AMDGPU/VOP2Instructions.td4
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3Instructions.td342
-rw-r--r--llvm/lib/Target/AMDGPU/VOPInstructions.td124
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp5
-rw-r--r--llvm/lib/Target/DirectX/DXILRootSignature.cpp26
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp33
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp6
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp6
-rw-r--r--llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp10
-rw-r--r--llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h1
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp5
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp89
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h3
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp53
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrFormats.td10
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp26
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td694
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXIntrinsics.td624
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td8
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrP10.td116
-rw-r--r--llvm/lib/Target/RISCV/RISCVCallingConv.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp28
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp146
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.h1
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td15
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td52
-rw-r--r--llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp52
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp15
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp65
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h4
-rw-r--r--llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp17
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp16
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp245
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.h4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssembly.td9
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td6
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h2
-rw-r--r--llvm/lib/Target/X86/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp8
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp22
-rw-r--r--llvm/lib/Target/X86/X86ISelLoweringCall.cpp4
-rw-r--r--llvm/lib/TargetParser/Host.cpp13
-rw-r--r--llvm/lib/TargetParser/TargetParser.cpp1
-rw-r--r--llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp102
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroSplit.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/LowerTypeTests.cpp127
-rw-r--r--llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp21
-rw-r--r--llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp31
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp154
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp8
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp16
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp51
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp30
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp80
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp9
-rw-r--r--llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp7
-rw-r--r--llvm/lib/Transforms/Scalar/NewGVN.cpp20
-rw-r--r--llvm/lib/Transforms/Utils/SCCPSolver.cpp143
-rw-r--r--llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp71
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h5
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp65
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp160
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.h12
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp18
-rw-r--r--llvm/lib/Transforms/Vectorize/VectorCombine.cpp7
-rw-r--r--llvm/test/Analysis/CostModel/ARM/arith-overflow.ll1023
-rw-r--r--llvm/test/Analysis/CostModel/ARM/arith-ssat.ll447
-rw-r--r--llvm/test/Analysis/CostModel/ARM/arith-usat.ll447
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/fround.ll626
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll8
-rw-r--r--llvm/test/Analysis/ScalarEvolution/zext-add.ll52
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll82
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll64
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-sret-demotion.ll48
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll16
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir32
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir80
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir48
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir22
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir6
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir16
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir8
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector.mir10
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir48
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir32
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-non-pow2-load-store.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir8
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir8
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir10
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir20
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir5
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-isel-csinc-type.ll127
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll110
-rw-r--r--llvm/test/CodeGen/AArch64/abds-neg.ll46
-rw-r--r--llvm/test/CodeGen/AArch64/abds.ll56
-rw-r--r--llvm/test/CodeGen/AArch64/abdu-neg.ll46
-rw-r--r--llvm/test/CodeGen/AArch64/abdu.ll56
-rw-r--r--llvm/test/CodeGen/AArch64/adc.ll49
-rw-r--r--llvm/test/CodeGen/AArch64/add-extract.ll145
-rw-r--r--llvm/test/CodeGen/AArch64/addcarry-crash.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/addsub.ll643
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-this-return.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vabs.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vmul.ll1358
-rw-r--r--llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/cmp-to-cmn.ll437
-rw-r--r--llvm/test/CodeGen/AArch64/combine-and-like.ll1
-rw-r--r--llvm/test/CodeGen/AArch64/constant-pool-partition.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll77
-rw-r--r--llvm/test/CodeGen/AArch64/logical_shifted_reg.ll65
-rw-r--r--llvm/test/CodeGen/AArch64/midpoint-int.ll56
-rw-r--r--llvm/test/CodeGen/AArch64/neg-abs.ll163
-rw-r--r--llvm/test/CodeGen/AArch64/neg-selects.ll122
-rw-r--r--llvm/test/CodeGen/AArch64/neon-dot-product.ll206
-rw-r--r--llvm/test/CodeGen/AArch64/neon-dotreduce.ll4360
-rw-r--r--llvm/test/CodeGen/AArch64/neon-extmul.ll108
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-and-tst.ll275
-rw-r--r--llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/reassocmls.ll211
-rw-r--r--llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir23
-rw-r--r--llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir161
-rw-r--r--llvm/test/CodeGen/AArch64/selectopt-const.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll1
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll1
-rw-r--r--llvm/test/CodeGen/AArch64/store-float-conversion.ll131
-rw-r--r--llvm/test/CodeGen/AArch64/tbl-loops.ll41
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-add.ll337
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/bug-legalization-artifact-combiner-dead-def.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll69
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll829
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll790
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll32
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll100
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-sret.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll162
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-invariant.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir14
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir136
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir1254
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-insert-vector-elt.mir258
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant-32bit.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant.mir1542
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir2724
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-global.mir2786
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir3950
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir4990
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-global.mir27
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir2120
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir60
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-global.mir27
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/minmaxabs-i64.ll192
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll119
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir52
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uniform-load-noclobber.mir18
-rw-r--r--llvm/test/CodeGen/AMDGPU/add-max.ll62
-rw-r--r--llvm/test/CodeGen/AMDGPU/bitop3.ll313
-rw-r--r--llvm/test/CodeGen/AMDGPU/branch-relaxation-inst-size-gfx11.ll51
-rw-r--r--llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.f16.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll304
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch.ll54
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll799
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll1783
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.ll633
-rw-r--r--llvm/test/CodeGen/AMDGPU/freeze.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert-waitcnts-fence-soft.mir133
-rw-r--r--llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll1201
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll543
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.bitop3.ll229
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll184
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll539
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll164
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.async.to.lds.ll189
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.store.async.from.lds.ll189
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.xf32.gfx942.ll116
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx11.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.iterative.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp10.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log10.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll107
-rw-r--r--llvm/test/CodeGen/AMDGPU/max.ll320
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory_clause.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/merged-bfx-opt.ll123
-rw-r--r--llvm/test/CodeGen/AMDGPU/min.ll531
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/rcp-pattern.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/reassoc-mul-add-1-to-mad.ll834
-rw-r--r--llvm/test/CodeGen/AMDGPU/rsq.f32.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/rsq.f64.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/saddsat.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/uaddsat.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll29
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll584
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll584
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll553
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll470
-rw-r--r--llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll531
-rw-r--r--llvm/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll2
-rw-r--r--llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir4
-rw-r--r--llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/ARM/fcopysign.ll1
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll12
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll140
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll98
-rw-r--r--llvm/test/CodeGen/M68k/GlobalISel/irtranslator-call.ll18
-rw-r--r--llvm/test/CodeGen/M68k/GlobalISel/legalize-load-store.mir6
-rw-r--r--llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll91
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll12
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll2
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/irtranslator/var_arg.ll2
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir21
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir66
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir862
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir862
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir44
-rw-r--r--llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/NVPTX/bug26185-2.ll9
-rw-r--r--llvm/test/CodeGen/NVPTX/combine-wide.ll1339
-rw-r--r--llvm/test/CodeGen/NVPTX/fold-movs.ll38
-rw-r--r--llvm/test/CodeGen/NVPTX/i8x4-instructions.ll168
-rw-r--r--llvm/test/CodeGen/NVPTX/ld-param-sink.ll47
-rw-r--r--llvm/test/CodeGen/NVPTX/local-stack-frame.ll7
-rw-r--r--llvm/test/CodeGen/NVPTX/vector-loads.ll11
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/build-vector-tests.ll48
-rw-r--r--llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll6
-rw-r--r--llvm/test/CodeGen/PowerPC/combine-fneg.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/fp-strict-round.ll6
-rw-r--r--llvm/test/CodeGen/PowerPC/frem.ll3
-rw-r--r--llvm/test/CodeGen/PowerPC/froundeven-legalization.ll8
-rw-r--r--llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/ldexp.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/llvm.modf.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/vec_insert_elt.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll176
-rw-r--r--llvm/test/CodeGen/PowerPC/xxeval-vselect-x-and.ll82
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-icmp-rv32.mir12
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir30
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir66
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir14
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir16
-rw-r--r--llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll33
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll449
-rw-r--r--llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/memset-inline.ll122
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll3
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/memset-inline.ll126
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/pr141907.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir327
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vl-opt.mir82
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/xmips-cbop.ll55
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll42
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/is_fpclass.ll408
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll104
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll19
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-mul-07.ll114
-rw-r--r--llvm/test/CodeGen/WebAssembly/narrow-simd-mul.ll95
-rw-r--r--llvm/test/CodeGen/WebAssembly/target-features-cpus.ll4
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir38
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/legalize-undef.mir3
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll4
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll24
-rw-r--r--llvm/test/CodeGen/X86/apx/cf.ll39
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-assembly.ll43
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-tailcall.ll34
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section.ll38
-rw-r--r--llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll185
-rw-r--r--llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir6
-rw-r--r--llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir44
-rw-r--r--llvm/test/CodeGen/X86/combine-add-ssat.ll4
-rw-r--r--llvm/test/CodeGen/X86/combine-add-usat.ll5
-rw-r--r--llvm/test/CodeGen/X86/combine-sub-ssat.ll4
-rw-r--r--llvm/test/CodeGen/X86/combine-sub-usat.ll6
-rw-r--r--llvm/test/CodeGen/X86/constant-pool-partition.ll8
-rw-r--r--llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir (renamed from llvm/test/CodeGen/X86/tail-dup-computed-goto.mir)44
-rw-r--r--llvm/test/CodeGen/X86/load-combine.ll6
-rw-r--r--llvm/test/CodeGen/X86/pr33960.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr76416.ll79
-rw-r--r--llvm/test/CodeGen/X86/stack-protector.ll1
-rw-r--r--llvm/test/CodeGen/X86/subreg-fail.mir4
-rw-r--r--llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir451
-rw-r--r--llvm/test/CodeGen/X86/win32-ssp.ll2
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll16
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/globals.ll24
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll186
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s15
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_ds.s78
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s300
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vflat.s3029
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s706
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s706
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s452
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s452
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s340
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s340
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s152
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s20
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt33
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt90
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vflat.txt75
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt761
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt379
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt291
-rw-r--r--llvm/test/MC/ELF/many-instructions.s10
-rw-r--r--llvm/test/MC/RISCV/rvv/fadd.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/fcompare.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/fdiv.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/fmacc.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/fminmax.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/fmul.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/fmv.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/fothers.s11
-rw-r--r--llvm/test/MC/RISCV/rvv/freduction.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/fsub.s19
-rw-r--r--llvm/test/MC/RISCV/rvv/zvfbfwma.s26
-rw-r--r--llvm/test/MC/RISCV/xqcisim-valid.s33
-rw-r--r--llvm/test/MC/RISCV/xqcisync-valid.s7
-rw-r--r--llvm/test/MC/X86/verify-callgraph-section.s58
-rw-r--r--llvm/test/TableGen/get-named-operand-idx.td12
-rw-r--r--llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll14
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll222
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll2
-rw-r--r--llvm/test/Transforms/GVN/PRE/load-metadata.ll47
-rw-r--r--llvm/test/Transforms/GVN/PRE/load-pre-across-backedge.ll6
-rw-r--r--llvm/test/Transforms/GVN/PRE/load-pre-nonlocal.ll6
-rw-r--r--llvm/test/Transforms/GVN/PRE/lpre-call-wrap.ll80
-rw-r--r--llvm/test/Transforms/GVN/PRE/rle-addrspace-cast.ll24
-rw-r--r--llvm/test/Transforms/GVN/PRE/rle-semidominated.ll55
-rw-r--r--llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll55
-rw-r--r--llvm/test/Transforms/IndVarSimplify/zext-nuw.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll14
-rw-r--r--llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/gep-vector.ll24
-rw-r--r--llvm/test/Transforms/InstCombine/gepphigep.ll126
-rw-r--r--llvm/test/Transforms/InstCombine/getelementptr.ll106
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-custom-dl.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-gep.ll35
-rw-r--r--llvm/test/Transforms/InstCombine/indexed-gep-compares.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/known-phi-recurse.ll5
-rw-r--r--llvm/test/Transforms/InstCombine/load-cmp.ll16
-rw-r--r--llvm/test/Transforms/InstCombine/loadstore-alignment.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/or-packed-int-vecs.ll926
-rw-r--r--llvm/test/Transforms/InstCombine/phi.ll27
-rw-r--r--llvm/test/Transforms/InstCombine/pr39908.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/pr58901.ll3
-rw-r--r--llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll53
-rw-r--r--llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll24
-rw-r--r--llvm/test/Transforms/InstCombine/strcmp-3.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/sub-gep.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/sub.ll24
-rw-r--r--llvm/test/Transforms/InstCombine/vector_gep1-inseltpoison.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/vector_gep1.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/vectorgep-crash.ll (renamed from llvm/test/Transforms/InstSimplify/ConstProp/vectorgep-crash.ll)10
-rw-r--r--llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll5
-rw-r--r--llvm/test/Transforms/LoopUnroll/AArch64/vector.ll194
-rw-r--r--llvm/test/Transforms/LoopUnroll/RISCV/vector.ll603
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll280
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll62
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll88
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/f16.ll45
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll144
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll76
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll162
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll71
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll77
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll114
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll28
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll356
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll102
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll21
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll60
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll27
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll34
-rw-r--r--llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll38
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop.ll54
-rw-r--r--llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll21
-rw-r--r--llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml8
-rw-r--r--llvm/test/Transforms/LowerTypeTests/export-alias.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll42
-rw-r--r--llvm/test/Transforms/SCCP/no-fold-fcmp-dynamic-denormal-mode-issue114947.ll7
-rw-r--r--llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll195
-rw-r--r--llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll95
-rw-r--r--llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll13
-rw-r--r--llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll889
-rw-r--r--llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test778
-rw-r--r--llvm/test/tools/llvm-ir2vec/entities.ll95
-rw-r--r--llvm/test/tools/llvm-ir2vec/error-handling.ll22
-rw-r--r--llvm/test/tools/llvm-ir2vec/triplets.ll51
-rw-r--r--llvm/test/tools/llvm-mc/disassembler-profile.test12
-rw-r--r--llvm/test/tools/llvm-objcopy/COFF/exe-bogus-assoc.test134
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexebin1611256 -> 1666656 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofrawbin75792 -> 20256 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/basic.memprofexebin1604896 -> 1660336 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/basic.memprofrawbin1152 -> 1152 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexebin0 -> 1604896 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofrawbin0 -> 1152 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/buildid.memprofexebin1604904 -> 1660336 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/buildid.memprofrawbin1152 -> 1152 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/inline.memprofexebin1605480 -> 1660912 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/inline.memprofrawbin976 -> 976 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/multi.memprofexebin1604912 -> 1660352 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/multi.memprofrawbin1920 -> 1920 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexebin1606576 -> 1661960 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofrawbin74952 -> 19608 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/pic.memprofexebin1607856 -> 1663288 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/pic.memprofrawbin1152 -> 1152 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-basic-histogram.test4
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-basic.test4
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-basic_v4.test102
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-inline.test2
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-multi.test2
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-padding-histogram.test154
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-pic.test4
-rw-r--r--llvm/test/tools/llvm-rc/windres-preproc.test4
-rw-r--r--llvm/test/tools/llvm-readobj/COFF/Inputs/has-cet.exebin94720 -> 0 bytes
-rw-r--r--llvm/test/tools/llvm-readobj/COFF/Inputs/has-exdllcharacteristics.exebin0 -> 5120 bytes
-rw-r--r--llvm/test/tools/llvm-readobj/COFF/cetcompat.test16
-rw-r--r--llvm/test/tools/llvm-readobj/COFF/exdllcharacteristics.test22
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/sframe-fde.test237
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/sframe-header.test67
-rw-r--r--llvm/test/tools/obj2yaml/ELF/eflags.yaml31
-rw-r--r--llvm/test/tools/yaml2obj/file-header-flags.yaml25
-rw-r--r--llvm/tools/llc/llc.cpp4
-rw-r--r--llvm/tools/llvm-exegesis/lib/X86/Target.cpp16
-rw-r--r--llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp207
-rw-r--r--llvm/tools/llvm-mc/Disassembler.cpp46
-rw-r--r--llvm/tools/llvm-mc/Disassembler.h3
-rw-r--r--llvm/tools/llvm-mc/llvm-mc.cpp37
-rw-r--r--llvm/tools/llvm-rc/llvm-rc.cpp2
-rw-r--r--llvm/tools/llvm-readobj/COFFDumper.cpp11
-rw-r--r--llvm/tools/llvm-readobj/ELFDumper.cpp129
-rw-r--r--llvm/tools/obj2yaml/elf2yaml.cpp3
-rw-r--r--llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt2
-rw-r--r--llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt2
-rw-r--r--llvm/unittests/CodeGen/CMakeLists.txt1
-rw-r--r--llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp4
-rw-r--r--llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp95
-rw-r--r--llvm/unittests/CodeGen/SelectionDAGNodeConstructionTest.cpp317
-rw-r--r--llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp94
-rw-r--r--llvm/unittests/CodeGen/SelectionDAGTestBase.h99
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp18
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp6
-rw-r--r--llvm/unittests/Frontend/CMakeLists.txt1
-rw-r--r--llvm/unittests/Frontend/HLSLBindingTest.cpp275
-rw-r--r--llvm/unittests/Support/VirtualFileSystemTest.cpp220
-rw-r--r--llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp171
-rw-r--r--llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp36
-rw-r--r--llvm/utils/TableGen/InstrInfoEmitter.cpp51
-rw-r--r--llvm/utils/UpdateTestChecks/asm.py8
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/lldb/source/Plugins/SymbolFile/DWARF/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn1
-rw-r--r--llvm/utils/lit/lit/Test.py3
-rw-r--r--llvm/utils/lit/lit/TestRunner.py2
-rw-r--r--llvm/utils/lit/lit/cl_arguments.py10
-rwxr-xr-xllvm/utils/lit/lit/main.py2
-rw-r--r--llvm/utils/lit/tests/Inputs/xfail-cl/true-xfail-conditionally.txt2
-rw-r--r--llvm/utils/lit/tests/xfail-cl.py28
-rw-r--r--llvm/utils/mlgo-utils/IR2Vec/generateTriplets.py304
-rwxr-xr-xllvm/utils/release/github-upload-release.py62
-rwxr-xr-xllvm/utils/update_mir_test_checks.py2
731 files changed, 57855 insertions, 26258 deletions
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index e874d76..4c70b98 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -149,7 +149,7 @@ endforeach()
# As we migrate runtimes to using the bootstrapping build, the set of default runtimes
# should grow as we remove those runtimes from LLVM_ENABLE_PROJECTS above.
set(LLVM_DEFAULT_RUNTIMES "libcxx;libcxxabi;libunwind")
-set(LLVM_SUPPORTED_RUNTIMES "libc;libunwind;libcxxabi;libcxx;compiler-rt;openmp;llvm-libgcc;offload;flang-rt;libclc")
+set(LLVM_SUPPORTED_RUNTIMES "libc;libunwind;libcxxabi;libcxx;compiler-rt;openmp;llvm-libgcc;offload;flang-rt;libclc;libsycl")
set(LLVM_ENABLE_RUNTIMES "" CACHE STRING
"Semicolon-separated list of runtimes to build, or \"all\" (${LLVM_DEFAULT_RUNTIMES}). Supported runtimes are ${LLVM_SUPPORTED_RUNTIMES}.")
if(LLVM_ENABLE_RUNTIMES STREQUAL "all")
diff --git a/llvm/cmake/modules/LLVMProcessSources.cmake b/llvm/cmake/modules/LLVMProcessSources.cmake
index a7f9517..cf358a8 100644
--- a/llvm/cmake/modules/LLVMProcessSources.cmake
+++ b/llvm/cmake/modules/LLVMProcessSources.cmake
@@ -58,13 +58,19 @@ function(llvm_process_sources OUT_VAR)
set(sources ${ARG_UNPARSED_ARGUMENTS})
llvm_check_source_file_list(${sources})
- foreach(fn ${sources})
- get_filename_component(suf ${fn} EXT)
- if("${suf}" STREQUAL ".cpp" OR "${suf}" STREQUAL ".c")
- get_filename_component(short_name ${fn} NAME)
- set_source_files_properties(${fn} PROPERTIES COMPILE_DEFINITIONS "__SHORT_FILE__=\"${short_name}\"")
- endif()
- endforeach()
+ # Don't generate __SHORT_FILE__ on VS builds as it can prevent build parallelisation.
+ if(NOT CMAKE_GENERATOR MATCHES "Visual Studio")
+ foreach(fn ${sources})
+ get_filename_component(suf ${fn} EXT)
+ if("${suf}" STREQUAL ".cpp" OR "${suf}" STREQUAL ".c")
+ get_filename_component(short_name ${fn} NAME)
+ set_property(
+ SOURCE ${fn}
+ APPEND
+ PROPERTY COMPILE_DEFINITIONS __SHORT_FILE__="${short_name}")
+ endif()
+ endforeach()
+ endif()
# This adds .td and .h files to the Visual Studio solution:
diff --git a/llvm/docs/CommandGuide/lit.rst b/llvm/docs/CommandGuide/lit.rst
index 938b7f9..eb90e95 100644
--- a/llvm/docs/CommandGuide/lit.rst
+++ b/llvm/docs/CommandGuide/lit.rst
@@ -356,6 +356,11 @@ The timing data is stored in the `test_exec_root` in a file named
primary purpose is to suppress an ``XPASS`` result without modifying a test
case that uses the ``XFAIL`` directive.
+.. option:: --exclude-xfail
+
+ ``XFAIL`` tests won't be run, unless they are listed in the ``--xfail-not``
+ (or ``LIT_XFAIL_NOT``) lists.
+
.. option:: --num-shards M
Divide the set of selected tests into ``M`` equal-sized subsets or
diff --git a/llvm/docs/CommandGuide/llvm-ir2vec.rst b/llvm/docs/CommandGuide/llvm-ir2vec.rst
index 13fe4996..2f00c9f 100644
--- a/llvm/docs/CommandGuide/llvm-ir2vec.rst
+++ b/llvm/docs/CommandGuide/llvm-ir2vec.rst
@@ -13,17 +13,21 @@ DESCRIPTION
:program:`llvm-ir2vec` is a standalone command-line tool for IR2Vec. It
generates IR2Vec embeddings for LLVM IR and supports triplet generation
-for vocabulary training. It provides two main operation modes:
+for vocabulary training. It provides three main operation modes:
-1. **Triplet Mode**: Generates triplets (opcode, type, operands) for vocabulary
+1. **Triplet Mode**: Generates numeric triplets in train2id format for vocabulary
training from LLVM IR.
-2. **Embedding Mode**: Generates IR2Vec embeddings using a trained vocabulary
+2. **Entity Mode**: Generates entity mapping files (entity2id.txt) for vocabulary
+ training.
+
+3. **Embedding Mode**: Generates IR2Vec embeddings using a trained vocabulary
at different granularity levels (instruction, basic block, or function).
The tool is designed to facilitate machine learning applications that work with
LLVM IR by converting the IR into numerical representations that can be used by
-ML models.
+ML models. The triplet mode generates numeric IDs directly instead of string
+triplets, streamlining the training data preparation workflow.
.. note::
@@ -34,18 +38,49 @@ ML models.
OPERATION MODES
---------------
+Triplet Generation and Entity Mapping Modes are used for preparing
+vocabulary and training data for knowledge graph embeddings. The Embedding Mode
+is used for generating embeddings from LLVM IR using a pre-trained vocabulary.
+
+The Seed Embedding Vocabulary of IR2Vec is trained on a large corpus of LLVM IR
+by modeling the relationships between opcodes, types, and operands as a knowledge
+graph. For this purpose, Triplet Generation and Entity Mapping Modes generate
+triplets and entity mappings in the standard format used for knowledge graph
+embedding training (see
+<https://github.com/thunlp/OpenKE/tree/OpenKE-PyTorch?tab=readme-ov-file#data-format>
+for details).
+
+See `llvm/utils/mlgo-utils/IR2Vec/generateTriplets.py` for more details on how
+these two modes are used to generate the triplets and entity mappings.
+
Triplet Generation Mode
~~~~~~~~~~~~~~~~~~~~~~~
-In triplet mode, :program:`llvm-ir2vec` analyzes LLVM IR and extracts triplets
-consisting of opcodes, types, and operands. These triplets can be used to train
-vocabularies for embedding generation.
+In triplet mode, :program:`llvm-ir2vec` analyzes LLVM IR and extracts numeric
+triplets consisting of opcode IDs, type IDs, and operand IDs. These triplets
+are generated in the standard format used for knowledge graph embedding training.
+The tool outputs numeric IDs directly using the ir2vec::Vocabulary mapping
+infrastructure, eliminating the need for string-to-ID preprocessing.
Usage:
.. code-block:: bash
- llvm-ir2vec --mode=triplets input.bc -o triplets.txt
+ llvm-ir2vec --mode=triplets input.bc -o triplets_train2id.txt
+
+Entity Mapping Generation Mode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In entity mode, :program:`llvm-ir2vec` generates the entity mappings supported by
+IR2Vec in the standard format used for knowledge graph embedding training. This
+mode outputs all supported entities (opcodes, types, and operands) with their
+corresponding numeric IDs, and is not specific for an LLVM IR file.
+
+Usage:
+
+.. code-block:: bash
+
+ llvm-ir2vec --mode=entities -o entity2id.txt
Embedding Generation Mode
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -67,6 +102,7 @@ OPTIONS
Specify the operation mode. Valid values are:
* ``triplets`` - Generate triplets for vocabulary training
+ * ``entities`` - Generate entity mappings for vocabulary training
* ``embeddings`` - Generate embeddings using trained vocabulary (default)
.. option:: --level=<level>
@@ -115,7 +151,7 @@ OPTIONS
``--level``, ``--function``, ``--ir2vec-vocab-path``, ``--ir2vec-opc-weight``,
``--ir2vec-type-weight``, and ``--ir2vec-arg-weight`` are only used in embedding
- mode. These options are ignored in triplet mode.
+ mode. These options are ignored in triplet and entity modes.
INPUT FILE FORMAT
-----------------
@@ -129,14 +165,34 @@ OUTPUT FORMAT
Triplet Mode Output
~~~~~~~~~~~~~~~~~~~
-In triplet mode, the output consists of lines containing space-separated triplets:
+In triplet mode, the output consists of numeric triplets in train2id format with
+metadata headers. The format includes:
+
+.. code-block:: text
+
+ MAX_RELATIONS=<max_relations_count>
+ <head_entity_id> <tail_entity_id> <relation_id>
+ <head_entity_id> <tail_entity_id> <relation_id>
+ ...
+
+Each line after the metadata header represents one instruction relationship,
+with numeric IDs for head entity, relation, and tail entity. The metadata
+header (MAX_RELATIONS) provides counts for post-processing and training setup.
+
+Entity Mode Output
+~~~~~~~~~~~~~~~~~~
+
+In entity mode, the output consists of entity mapping in the format:
.. code-block:: text
- <opcode> <type> <operand1> <operand2> ...
+ <total_entities>
+ <entity_string> <numeric_id>
+ <entity_string> <numeric_id>
+ ...
-Each line represents the information of one instruction, with the opcode, type,
-and operands.
+The first line contains the total number of entities, followed by one entity
+mapping per line with tab-separated entity string and numeric ID.
Embedding Mode Output
~~~~~~~~~~~~~~~~~~~~~
diff --git a/llvm/docs/HowToCrossCompileBuiltinsOnArm.rst b/llvm/docs/HowToCrossCompileBuiltinsOnArm.rst
index 2e199a0..31ead45 100644
--- a/llvm/docs/HowToCrossCompileBuiltinsOnArm.rst
+++ b/llvm/docs/HowToCrossCompileBuiltinsOnArm.rst
@@ -14,117 +14,113 @@ targets are welcome.
The instructions in this document depend on libraries and programs external to
LLVM, there are many ways to install and configure these dependencies so you
-may need to adapt the instructions here to fit your own local situation.
+may need to adapt the instructions here to fit your own situation.
Prerequisites
=============
-In this use case we'll be using cmake on a Debian-based Linux system,
-cross-compiling from an x86_64 host to a hard-float Armv7-A target. We'll be
+In this use case we will be using cmake on a Debian-based Linux system,
+cross-compiling from an x86_64 host to a hard-float Armv7-A target. We will be
using as many of the LLVM tools as we can, but it is possible to use GNU
equivalents.
- * ``A build of LLVM/clang for the llvm-tools and llvm-config``
- * ``A clang executable with support for the ARM target``
- * ``compiler-rt sources``
- * ``The qemu-arm user mode emulator``
- * ``An arm-linux-gnueabihf sysroot``
+You will need:
+ * A build of LLVM for the llvm-tools and ``llvm-config``.
+ * A clang executable with support for the ``ARM`` target.
+ * compiler-rt sources.
+ * The ``qemu-arm`` user mode emulator.
+ * An ``arm-linux-gnueabihf`` sysroot.
-In this example we will be using ninja.
+In this example we will be using ``ninja`` as the build tool.
-See https://compiler-rt.llvm.org/ for more information about the dependencies
+See https://compiler-rt.llvm.org/ for information about the dependencies
on clang and LLVM.
See https://llvm.org/docs/GettingStarted.html for information about obtaining
-the source for LLVM and compiler-rt. Note that the getting started guide
-places compiler-rt in the projects subdirectory, but this is not essential and
-if you are using the BaremetalARM.cmake cache for v6-M, v7-M and v7-EM then
-compiler-rt must be placed in the runtimes directory.
+the source for LLVM and compiler-rt.
``qemu-arm`` should be available as a package for your Linux distribution.
-The most complicated of the prerequisites to satisfy is the arm-linux-gnueabihf
+The most complicated of the prerequisites to satisfy is the ``arm-linux-gnueabihf``
sysroot. In theory it is possible to use the Linux distributions multiarch
support to fulfill the dependencies for building but unfortunately due to
-/usr/local/include being added some host includes are selected. The easiest way
-to supply a sysroot is to download the arm-linux-gnueabihf toolchain. This can
-be found at:
-* https://developer.arm.com/open-source/gnu-toolchain/gnu-a/downloads for gcc 8 and above
-* https://releases.linaro.org/components/toolchain/binaries/ for gcc 4.9 to 7.3
+``/usr/local/include`` being added some host includes are selected.
+
+The easiest way to supply a sysroot is to download an ``arm-linux-gnueabihf``
+toolchain from https://developer.arm.com/open-source/gnu-toolchain/gnu-a/downloads.
Building compiler-rt builtins for Arm
=====================================
+
We will be doing a standalone build of compiler-rt using the following cmake
-options.
-
-* ``path/to/compiler-rt``
-* ``-G Ninja``
-* ``-DCMAKE_AR=/path/to/llvm-ar``
-* ``-DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf"``
-* ``-DCMAKE_ASM_FLAGS="build-c-flags"``
-* ``-DCMAKE_C_COMPILER=/path/to/clang``
-* ``-DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf"``
-* ``-DCMAKE_C_FLAGS="build-c-flags"``
-* ``-DCMAKE_EXE_LINKER_FLAGS="-fuse-ld=lld"``
-* ``-DCMAKE_NM=/path/to/llvm-nm``
-* ``-DCMAKE_RANLIB=/path/to/llvm-ranlib``
-* ``-DCOMPILER_RT_BUILD_BUILTINS=ON``
-* ``-DCOMPILER_RT_BUILD_LIBFUZZER=OFF``
-* ``-DCOMPILER_RT_BUILD_MEMPROF=OFF``
-* ``-DCOMPILER_RT_BUILD_PROFILE=OFF``
-* ``-DCOMPILER_RT_BUILD_SANITIZERS=OFF``
-* ``-DCOMPILER_RT_BUILD_XRAY=OFF``
-* ``-DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON``
-* ``-DLLVM_CONFIG_PATH=/path/to/llvm-config``
+options::
+
+ cmake path/to/compiler-rt \
+ -G Ninja \
+ -DCMAKE_AR=/path/to/llvm-ar \
+ -DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf" \
+ -DCMAKE_ASM_FLAGS="build-c-flags" \
+ -DCMAKE_C_COMPILER=/path/to/clang \
+ -DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf" \
+ -DCMAKE_C_FLAGS="build-c-flags" \
+ -DCMAKE_EXE_LINKER_FLAGS="-fuse-ld=lld" \
+ -DCMAKE_NM=/path/to/llvm-nm \
+ -DCMAKE_RANLIB=/path/to/llvm-ranlib \
+ -DCOMPILER_RT_BUILD_BUILTINS=ON \
+ -DCOMPILER_RT_BUILD_LIBFUZZER=OFF \
+ -DCOMPILER_RT_BUILD_MEMPROF=OFF \
+ -DCOMPILER_RT_BUILD_PROFILE=OFF \
+ -DCOMPILER_RT_BUILD_SANITIZERS=OFF \
+ -DCOMPILER_RT_BUILD_XRAY=OFF \
+ -DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON \
+ -DLLVM_CONFIG_PATH=/path/to/llvm-config
The ``build-c-flags`` need to be sufficient to pass the C-make compiler check,
compile compiler-rt, and if you are running the tests, compile and link the
tests. When cross-compiling with clang we will need to pass sufficient
-information to generate code for the Arm architecture we are targeting. We will
-need to select the Arm target, select the Armv7-A architecture and choose
-between using Arm or Thumb.
-instructions. For example:
+information to generate code for the Arm architecture we are targeting.
-* ``--target=arm-linux-gnueabihf``
-* ``-march=armv7a``
-* ``-mthumb``
+We will need to select:
+ * The Arm target and Armv7-A architecture with ``--target=arm-linux-gnueabihf -march=armv7a``.
+ * Whether to generate Arm (the default) or Thumb instructions (``-mthumb``).
-When using a GCC arm-linux-gnueabihf toolchain the following flags are
+When using a GCC ``arm-linux-gnueabihf`` toolchain the following flags are
needed to pick up the includes and libraries:
-* ``--gcc-toolchain=/path/to/dir/toolchain``
-* ``--sysroot=/path/to/toolchain/arm-linux-gnueabihf/libc``
+ * ``--gcc-toolchain=/path/to/dir/toolchain``
+ * ``--sysroot=/path/to/toolchain/arm-linux-gnueabihf/libc``
In this example we will be adding all of the command line options to both
``CMAKE_C_FLAGS`` and ``CMAKE_ASM_FLAGS``. There are cmake flags to pass some of
-these options individually which can be used to simplify the ``build-c-flags``:
+these options individually which can be used to simplify the ``build-c-flags``::
-* ``-DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf"``
-* ``-DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf"``
-* ``-DCMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN=/path/to/dir/toolchain``
-* ``-DCMAKE_SYSROOT=/path/to/dir/toolchain/arm-linux-gnueabihf/libc``
+ -DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf"
+ -DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf"
+ -DCMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN=/path/to/dir/toolchain
+ -DCMAKE_SYSROOT=/path/to/dir/toolchain/arm-linux-gnueabihf/libc
Once cmake has completed the builtins can be built with ``ninja builtins``
Testing compiler-rt builtins using qemu-arm
===========================================
+
To test the builtins library we need to add a few more cmake flags to enable
testing and set up the compiler and flags for test case. We must also tell
-cmake that we wish to run the tests on ``qemu-arm``.
+cmake that we wish to run the tests on ``qemu-arm``::
-* ``-DCOMPILER_RT_EMULATOR="qemu-arm -L /path/to/armhf/sysroot``
-* ``-DCOMPILER_RT_INCLUDE_TESTS=ON``
-* ``-DCOMPILER_RT_TEST_COMPILER="/path/to/clang"``
-* ``-DCOMPILER_RT_TEST_COMPILER_CFLAGS="test-c-flags"``
+ -DCOMPILER_RT_EMULATOR="qemu-arm -L /path/to/armhf/sysroot"
+ -DCOMPILER_RT_INCLUDE_TESTS=ON
+ -DCOMPILER_RT_TEST_COMPILER="/path/to/clang"
+ -DCOMPILER_RT_TEST_COMPILER_CFLAGS="test-c-flags"
The ``/path/to/armhf/sysroot`` should be the same as the one passed to
-``--sysroot`` in the "build-c-flags".
+``--sysroot`` in the ``build-c-flags``.
-The "test-c-flags" need to include the target, architecture, gcc-toolchain,
-sysroot and arm/thumb state. The additional cmake defines such as
+The ``test-c-flags`` need to include the target, architecture, gcc-toolchain,
+sysroot and Arm/Thumb state. The additional cmake defines such as
``CMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN`` do not apply when building the tests. If
-you have put all of these in "build-c-flags" then these can be repeated. If you
-wish to use lld to link the tests then add ``"-fuse-ld=lld``.
+you have put all of these in ``build-c-flags`` then these can be repeated. If you
+wish to use lld to link the tests then add ``-fuse-ld=lld``.
Once cmake has completed the tests can be built and run using
``ninja check-builtins``
@@ -142,19 +138,21 @@ This stage can often fail at link time if the ``--sysroot=`` and
``CMAKE_C_FLAGS`` and ``CMAKE_C_COMPILER_TARGET`` flags.
It can be useful to build a simple example outside of cmake with your toolchain
-to make sure it is working. For example: ``clang --target=arm-linux-gnueabi -march=armv7a --gcc-toolchain=/path/to/gcc-toolchain --sysroot=/path/to/gcc-toolchain/arm-linux-gnueabihf/libc helloworld.c``
+to make sure it is working. For example::
+
+ clang --target=arm-linux-gnueabi -march=armv7a --gcc-toolchain=/path/to/gcc-toolchain --sysroot=/path/to/gcc-toolchain/arm-linux-gnueabihf/libc helloworld.c
Clang uses the host header files
--------------------------------
On debian based systems it is possible to install multiarch support for
-arm-linux-gnueabi and arm-linux-gnueabihf. In many cases clang can successfully
+``arm-linux-gnueabi`` and ``arm-linux-gnueabihf``. In many cases clang can successfully
use this multiarch support when ``--gcc-toolchain=`` and ``--sysroot=`` are not supplied.
Unfortunately clang adds ``/usr/local/include`` before
``/usr/include/arm-linux-gnueabihf`` leading to errors when compiling the hosts
header files.
The multiarch support is not sufficient to build the builtins you will need to
-use a separate arm-linux-gnueabihf toolchain.
+use a separate ``arm-linux-gnueabihf`` toolchain.
No target passed to clang
-------------------------
@@ -164,12 +162,13 @@ as ``error: unknown directive .syntax unified``.
You can check the clang invocation in the error message to see if there is no
``--target`` or if it is set incorrectly. The cause is usually
-``CMAKE_ASM_FLAGS`` not containing ``--target`` or ``CMAKE_ASM_COMPILER_TARGET`` not being present.
+``CMAKE_ASM_FLAGS`` not containing ``--target`` or ``CMAKE_ASM_COMPILER_TARGET``
+not being present.
Arm architecture not given
--------------------------
-The ``--target=arm-linux-gnueabihf`` will default to arm architecture v4t which
-cannot assemble the barrier instructions used in the synch_and_fetch source
+The ``--target=arm-linux-gnueabihf`` will default to Arm architecture v4t which
+cannot assemble the barrier instructions used in the ``synch_and_fetch`` source
files.
The cause is usually a missing ``-march=armv7a`` from the ``CMAKE_ASM_FLAGS``.
@@ -202,7 +201,7 @@ may need extra c-flags such as ``-mfloat-abi=softfp`` for use of floating-point
instructions, and ``-mfloat-abi=soft -mfpu=none`` for software floating-point
emulation.
-You will need to use an arm-linux-gnueabi GNU toolchain for soft-float.
+You will need to use an ``arm-linux-gnueabi`` GNU toolchain for soft-float.
AArch64 Target
--------------
@@ -220,8 +219,12 @@ Armv6-m, Armv7-m and Armv7E-M targets
To build and test the libraries using a similar method to Armv7-A is possible
but more difficult. The main problems are:
-* There isn't a ``qemu-arm`` user-mode emulator for bare-metal systems. The ``qemu-system-arm`` can be used but this is significantly more difficult to setup.
-* The targets to compile compiler-rt have the suffix -none-eabi. This uses the BareMetal driver in clang and by default won't find the libraries needed to pass the cmake compiler check.
+* There is not a ``qemu-arm`` user-mode emulator for bare-metal systems.
+ ``qemu-system-arm`` can be used but this is significantly more difficult
+ to setup.
+* The targets to compile compiler-rt have the suffix ``-none-eabi``. This uses
+ the BareMetal driver in clang and by default will not find the libraries
+ needed to pass the cmake compiler check.
As the Armv6-M, Armv7-M and Armv7E-M builds of compiler-rt only use instructions
that are supported on Armv7-A we can still get most of the value of running the
@@ -233,32 +236,30 @@ builtins use instructions that are supported on Armv7-A but not Armv6-M,
Armv7-M and Armv7E-M.
To get the cmake compile test to pass you will need to pass the libraries
-needed to successfully link the cmake test via ``CMAKE_CFLAGS``. It is
-strongly recommended that you use version 3.6 or above of cmake so you can use
-``CMAKE_TRY_COMPILE_TARGET=STATIC_LIBRARY`` to skip the link step.
-
-* ``-DCMAKE_TRY_COMPILE_TARGET_TYPE=STATIC_LIBRARY``
-* ``-DCOMPILER_RT_OS_DIR="baremetal"``
-* ``-DCOMPILER_RT_BUILD_BUILTINS=ON``
-* ``-DCOMPILER_RT_BUILD_SANITIZERS=OFF``
-* ``-DCOMPILER_RT_BUILD_XRAY=OFF``
-* ``-DCOMPILER_RT_BUILD_LIBFUZZER=OFF``
-* ``-DCOMPILER_RT_BUILD_PROFILE=OFF``
-* ``-DCMAKE_C_COMPILER=${host_install_dir}/bin/clang``
-* ``-DCMAKE_C_COMPILER_TARGET="your *-none-eabi target"``
-* ``-DCMAKE_ASM_COMPILER_TARGET="your *-none-eabi target"``
-* ``-DCMAKE_AR=/path/to/llvm-ar``
-* ``-DCMAKE_NM=/path/to/llvm-nm``
-* ``-DCMAKE_RANLIB=/path/to/llvm-ranlib``
-* ``-DCOMPILER_RT_BAREMETAL_BUILD=ON``
-* ``-DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON``
-* ``-DLLVM_CONFIG_PATH=/path/to/llvm-config``
-* ``-DCMAKE_C_FLAGS="build-c-flags"``
-* ``-DCMAKE_ASM_FLAGS="build-c-flags"``
-* ``-DCOMPILER_RT_EMULATOR="qemu-arm -L /path/to/armv7-A/sysroot"``
-* ``-DCOMPILER_RT_INCLUDE_TESTS=ON``
-* ``-DCOMPILER_RT_TEST_COMPILER="/path/to/clang"``
-* ``-DCOMPILER_RT_TEST_COMPILER_CFLAGS="test-c-flags"``
+needed to successfully link the cmake test via ``CMAKE_CFLAGS``::
+
+ -DCMAKE_TRY_COMPILE_TARGET_TYPE=STATIC_LIBRARY \
+ -DCOMPILER_RT_OS_DIR="baremetal" \
+ -DCOMPILER_RT_BUILD_BUILTINS=ON \
+ -DCOMPILER_RT_BUILD_SANITIZERS=OFF \
+ -DCOMPILER_RT_BUILD_XRAY=OFF \
+ -DCOMPILER_RT_BUILD_LIBFUZZER=OFF \
+ -DCOMPILER_RT_BUILD_PROFILE=OFF \
+ -DCMAKE_C_COMPILER=${host_install_dir}/bin/clang \
+ -DCMAKE_C_COMPILER_TARGET="your *-none-eabi target" \
+ -DCMAKE_ASM_COMPILER_TARGET="your *-none-eabi target" \
+ -DCMAKE_AR=/path/to/llvm-ar \
+ -DCMAKE_NM=/path/to/llvm-nm \
+ -DCMAKE_RANLIB=/path/to/llvm-ranlib \
+ -DCOMPILER_RT_BAREMETAL_BUILD=ON \
+ -DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON \
+ -DLLVM_CONFIG_PATH=/path/to/llvm-config \
+ -DCMAKE_C_FLAGS="build-c-flags" \
+ -DCMAKE_ASM_FLAGS="build-c-flags" \
+ -DCOMPILER_RT_EMULATOR="qemu-arm -L /path/to/armv7-A/sysroot" \
+ -DCOMPILER_RT_INCLUDE_TESTS=ON \
+ -DCOMPILER_RT_TEST_COMPILER="/path/to/clang" \
+ -DCOMPILER_RT_TEST_COMPILER_CFLAGS="test-c-flags"
The Armv6-M builtins will use the soft-float ABI. When compiling the tests for
Armv7-A we must include ``"-mthumb -mfloat-abi=soft -mfpu=none"`` in the
@@ -267,25 +268,21 @@ test-c-flags. We must use an Armv7-A soft-float abi sysroot for ``qemu-arm``.
Depending on the linker used for the test cases you may encounter BuildAttribute
mismatches between the M-profile objects from compiler-rt and the A-profile
objects from the test. The lld linker does not check the profile
-BuildAttribute so it can be used to link the tests by adding -fuse-ld=lld to the
+BuildAttribute so it can be used to link the tests by adding ``-fuse-ld=lld`` to the
``COMPILER_RT_TEST_COMPILER_CFLAGS``.
Alternative using a cmake cache
-------------------------------
If you wish to build, but not test compiler-rt for Armv6-M, Armv7-M or Armv7E-M
-the easiest way is to use the BaremetalARM.cmake recipe in clang/cmake/caches.
-
-You will need a bare metal sysroot such as that provided by the GNU ARM
-Embedded toolchain.
-
-The libraries can be built with the cmake options:
+the easiest way is to use the ``BaremetalARM.cmake`` recipe in ``clang/cmake/caches``.
-* ``-DBAREMETAL_ARMV6M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi``
-* ``-DBAREMETAL_ARMV7M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi``
-* ``-DBAREMETAL_ARMV7EM_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi``
-* ``-C /path/to/llvm/source/tools/clang/cmake/caches/BaremetalARM.cmake``
-* ``/path/to/llvm``
+You will need a bare metal sysroot such as that provided by the GNU ARM Embedded
+toolchain.
-**Note** that for the recipe to work the compiler-rt source must be checked out
-into the directory llvm/runtimes. You will also need clang and lld checked out.
+The libraries can be built with the cmake options::
+ -DBAREMETAL_ARMV6M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi \
+ -DBAREMETAL_ARMV7M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi \
+ -DBAREMETAL_ARMV7EM_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi \
+ -C /path/to/llvm/source/tools/clang/cmake/caches/BaremetalARM.cmake \
+ /path/to/llvm
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index eb2ef6b..28746bf 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -413,6 +413,8 @@ added in the future:
- On AArch64 the callee preserves all general purpose registers, except
X0-X8 and X16-X18. Not allowed with ``nest``.
+ - On RISC-V the callee preserve x5-x31 except x6, x7 and x28 registers.
+
The idea behind this convention is to support calls to runtime functions
that have a hot path and a cold path. The hot path is usually a small piece
of code that doesn't use many registers. The cold path might need to call out to
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index 021f321..0c49fc8 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -137,6 +137,9 @@ Changes to the LLVM tools
Changes to LLDB
---------------------------------
+* LLDB can now set breakpoints, show backtraces, and display variables when
+ debugging Wasm with supported runtimes (WAMR and V8).
+
Changes to BOLT
---------------------------------
diff --git a/llvm/docs/TableGen/BackGuide.rst b/llvm/docs/TableGen/BackGuide.rst
index 4828f9b..83f8f470 100644
--- a/llvm/docs/TableGen/BackGuide.rst
+++ b/llvm/docs/TableGen/BackGuide.rst
@@ -191,7 +191,7 @@ Some of these classes have additional members that
are described in the following subsections.
*All* of the classes derived from ``RecTy`` provide the ``get()`` function.
-It returns an instance of ``Recty`` corresponding to the derived class.
+It returns an instance of ``RecTy`` corresponding to the derived class.
Some of the ``get()`` functions require an argument to
specify which particular variant of the type is desired. These arguments are
described in the following subsections.
@@ -354,12 +354,12 @@ The class provides many additional functions:
* Functions to determine whether there are any operands and to get the
number of operands.
-* Functions to the get the operands, both individually and together.
+* Functions to get the operands, both individually and together.
* Functions to determine whether there are any names and to
get the number of names
-* Functions to the get the names, both individually and together.
+* Functions to get the names, both individually and together.
* Functions to get the operand iterator ``begin()`` and ``end()`` values.
@@ -605,7 +605,7 @@ null if the field does not exist.
The field is assumed to have another record as its value. That record is returned
as a pointer to a ``Record``. If the field does not exist or is unset, the
-functions returns null.
+function returns null.
Getting Record Superclasses
===========================
diff --git a/llvm/include/llvm/ADT/Any.h b/llvm/include/llvm/ADT/Any.h
index 88dbce9..a29aaa3 100644
--- a/llvm/include/llvm/ADT/Any.h
+++ b/llvm/include/llvm/ADT/Any.h
@@ -119,7 +119,6 @@ private:
template <class T> friend T any_cast(Any &&Value);
template <class T> friend const T *any_cast(const Any *Value);
template <class T> friend T *any_cast(Any *Value);
- template <typename T> friend bool any_isa(const Any &Value);
std::unique_ptr<StorageBase> Storage;
};
diff --git a/llvm/include/llvm/Analysis/DXILResource.h b/llvm/include/llvm/Analysis/DXILResource.h
index 956dcbc..93c6bfb 100644
--- a/llvm/include/llvm/Analysis/DXILResource.h
+++ b/llvm/include/llvm/Analysis/DXILResource.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Frontend/HLSL/HLSLBinding.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/PassManager.h"
@@ -633,86 +634,25 @@ LLVM_ABI ModulePass *createDXILResourceWrapperPassPass();
// register slots to resources with implicit bindings, and in a
// post-optimization validation pass that will raise diagnostic about
// overlapping bindings.
-//
-// For example for these resource bindings:
-//
-// RWBuffer<float> A[10] : register(u3);
-// RWBuffer<float> B[] : register(u5, space2)
-//
-// The analysis result for UAV binding type will look like this:
-//
-// UAVSpaces {
-// ResClass = ResourceClass::UAV,
-// Spaces = {
-// { Space = 0, FreeRanges = {{ 0, 2 }, { 13, UINT32_MAX }} },
-// { Space = 2, FreeRanges = {{ 0, 4 }} }
-// }
-// }
-//
class DXILResourceBindingInfo {
-public:
- struct BindingRange {
- uint32_t LowerBound;
- uint32_t UpperBound;
- BindingRange(uint32_t LB, uint32_t UB) : LowerBound(LB), UpperBound(UB) {}
- };
-
- struct RegisterSpace {
- uint32_t Space;
- SmallVector<BindingRange> FreeRanges;
- RegisterSpace(uint32_t Space) : Space(Space) {
- FreeRanges.emplace_back(0, UINT32_MAX);
- }
- // Size == -1 means unbounded array
- LLVM_ABI std::optional<uint32_t> findAvailableBinding(int32_t Size);
- };
-
- struct BindingSpaces {
- dxil::ResourceClass RC;
- llvm::SmallVector<RegisterSpace> Spaces;
- BindingSpaces(dxil::ResourceClass RC) : RC(RC) {}
- LLVM_ABI RegisterSpace &getOrInsertSpace(uint32_t Space);
- };
-
-private:
- BindingSpaces SRVSpaces, UAVSpaces, CBufferSpaces, SamplerSpaces;
- bool ImplicitBinding;
- bool OverlappingBinding;
+ hlsl::BindingInfo Bindings;
+ bool HasImplicitBinding = false;
+ bool HasOverlappingBinding = false;
// Populate the resource binding info given explicit resource binding calls
// in the module.
void populate(Module &M, DXILResourceTypeMap &DRTM);
public:
- DXILResourceBindingInfo()
- : SRVSpaces(dxil::ResourceClass::SRV),
- UAVSpaces(dxil::ResourceClass::UAV),
- CBufferSpaces(dxil::ResourceClass::CBuffer),
- SamplerSpaces(dxil::ResourceClass::Sampler), ImplicitBinding(false),
- OverlappingBinding(false) {}
-
- bool hasImplicitBinding() const { return ImplicitBinding; }
- void setHasImplicitBinding(bool Value) { ImplicitBinding = Value; }
- bool hasOverlappingBinding() const { return OverlappingBinding; }
-
- BindingSpaces &getBindingSpaces(dxil::ResourceClass RC) {
- switch (RC) {
- case dxil::ResourceClass::SRV:
- return SRVSpaces;
- case dxil::ResourceClass::UAV:
- return UAVSpaces;
- case dxil::ResourceClass::CBuffer:
- return CBufferSpaces;
- case dxil::ResourceClass::Sampler:
- return SamplerSpaces;
- }
+ bool hasImplicitBinding() const { return HasImplicitBinding; }
+ void setHasImplicitBinding(bool Value) { HasImplicitBinding = Value; }
+ bool hasOverlappingBinding() const { return HasOverlappingBinding; }
+ void setHasOverlappingBinding(bool Value) { HasOverlappingBinding = Value; }
- llvm_unreachable("Invalid resource class");
- }
-
- // Size == -1 means unbounded array
LLVM_ABI std::optional<uint32_t>
- findAvailableBinding(dxil::ResourceClass RC, uint32_t Space, int32_t Size);
+ findAvailableBinding(dxil::ResourceClass RC, uint32_t Space, int32_t Size) {
+ return Bindings.findAvailableBinding(RC, Space, Size);
+ }
friend class DXILResourceBindingAnalysis;
friend class DXILResourceBindingWrapperPass;
diff --git a/llvm/include/llvm/Analysis/IR2Vec.h b/llvm/include/llvm/Analysis/IR2Vec.h
index 498c19b..17f4112 100644
--- a/llvm/include/llvm/Analysis/IR2Vec.h
+++ b/llvm/include/llvm/Analysis/IR2Vec.h
@@ -164,37 +164,37 @@ public:
static_cast<unsigned>(OperandKind::MaxOperandKind);
Vocabulary() = default;
- Vocabulary(VocabVector &&Vocab);
+ LLVM_ABI Vocabulary(VocabVector &&Vocab);
- bool isValid() const;
- unsigned getDimension() const;
- size_t size() const;
+ LLVM_ABI bool isValid() const;
+ LLVM_ABI unsigned getDimension() const;
+ LLVM_ABI size_t size() const;
static size_t expectedSize() {
return MaxOpcodes + MaxTypeIDs + MaxOperandKinds;
}
/// Helper function to get vocabulary key for a given Opcode
- static StringRef getVocabKeyForOpcode(unsigned Opcode);
+ LLVM_ABI static StringRef getVocabKeyForOpcode(unsigned Opcode);
/// Helper function to get vocabulary key for a given TypeID
- static StringRef getVocabKeyForTypeID(Type::TypeID TypeID);
+ LLVM_ABI static StringRef getVocabKeyForTypeID(Type::TypeID TypeID);
/// Helper function to get vocabulary key for a given OperandKind
- static StringRef getVocabKeyForOperandKind(OperandKind Kind);
+ LLVM_ABI static StringRef getVocabKeyForOperandKind(OperandKind Kind);
/// Helper function to classify an operand into OperandKind
- static OperandKind getOperandKind(const Value *Op);
+ LLVM_ABI static OperandKind getOperandKind(const Value *Op);
/// Helpers to return the IDs of a given Opcode, TypeID, or OperandKind
- static unsigned getNumericID(unsigned Opcode);
- static unsigned getNumericID(Type::TypeID TypeID);
- static unsigned getNumericID(const Value *Op);
+ LLVM_ABI static unsigned getNumericID(unsigned Opcode);
+ LLVM_ABI static unsigned getNumericID(Type::TypeID TypeID);
+ LLVM_ABI static unsigned getNumericID(const Value *Op);
/// Accessors to get the embedding for a given entity.
- const ir2vec::Embedding &operator[](unsigned Opcode) const;
- const ir2vec::Embedding &operator[](Type::TypeID TypeId) const;
- const ir2vec::Embedding &operator[](const Value *Arg) const;
+ LLVM_ABI const ir2vec::Embedding &operator[](unsigned Opcode) const;
+ LLVM_ABI const ir2vec::Embedding &operator[](Type::TypeID TypeId) const;
+ LLVM_ABI const ir2vec::Embedding &operator[](const Value *Arg) const;
/// Const Iterator type aliases
using const_iterator = VocabVector::const_iterator;
@@ -221,13 +221,13 @@ public:
/// Returns the string key for a given index position in the vocabulary.
/// This is useful for debugging or printing the vocabulary. Do not use this
/// for embedding generation as string based lookups are inefficient.
- static StringRef getStringKey(unsigned Pos);
+ LLVM_ABI static StringRef getStringKey(unsigned Pos);
/// Create a dummy vocabulary for testing purposes.
- static VocabVector createDummyVocabForTest(unsigned Dim = 1);
+ LLVM_ABI static VocabVector createDummyVocabForTest(unsigned Dim = 1);
- bool invalidate(Module &M, const PreservedAnalyses &PA,
- ModuleAnalysisManager::Invalidator &Inv) const;
+ LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
+ ModuleAnalysisManager::Invalidator &Inv) const;
};
/// Embedder provides the interface to generate embeddings (vector
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
index bff7707..011d599 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
@@ -91,6 +91,10 @@ inline bind_ty<const SCEVUnknown> m_SCEVUnknown(const SCEVUnknown *&V) {
return V;
}
+inline bind_ty<const SCEVAddExpr> m_scev_Add(const SCEVAddExpr *&V) {
+ return V;
+}
+
/// Match a specified const SCEV *.
struct specificscev_ty {
const SCEV *Expr;
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 7928835..aa4550d 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1950,6 +1950,10 @@ public:
const Function &F,
SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const;
+ /// Returns true if GEP should not be used to index into vectors for this
+ /// target.
+ LLVM_ABI bool allowVectorElementIndexingUsingGEP() const;
+
private:
std::unique_ptr<const TargetTransformInfoImplBase> TTIImpl;
};
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 2ea87b3..abdbca0 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -1145,6 +1145,8 @@ public:
const Function &F,
SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {}
+ virtual bool allowVectorElementIndexingUsingGEP() const { return true; }
+
protected:
// Obtain the minimum required size to hold the value (without the sign)
// In case of a vector it returns the min required size for one element.
diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h
index b55c4e0..6781cd5 100644
--- a/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/llvm/include/llvm/Analysis/VectorUtils.h
@@ -633,6 +633,9 @@ public:
return true;
}
+ /// Return true if this group is full, i.e. it has no gaps.
+ bool isFull() const { return getNumMembers() == getFactor(); }
+
private:
uint32_t Factor; // Interleave Factor.
bool Reverse;
diff --git a/llvm/include/llvm/BinaryFormat/COFF.h b/llvm/include/llvm/BinaryFormat/COFF.h
index f3b5d5e..64fe216 100644
--- a/llvm/include/llvm/BinaryFormat/COFF.h
+++ b/llvm/include/llvm/BinaryFormat/COFF.h
@@ -694,7 +694,24 @@ enum DLLCharacteristics : unsigned {
enum ExtendedDLLCharacteristics : unsigned {
/// Image is CET compatible
- IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT = 0x0001
+ IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT = 0x0001,
+ /// Image is CET compatible in strict mode
+ IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT_STRICT_MODE = 0x0002,
+ /// Image is CET compatible in such a way that context IP validation is
+ /// relaxed
+ IMAGE_DLL_CHARACTERISTICS_EX_CET_SET_CONTEXT_IP_VALIDATION_RELAXED_MODE =
+ 0x0004,
+ /// Image is CET compatible in such a way that the use of
+ /// dynamic APIs is restricted to processes only
+ IMAGE_DLL_CHARACTERISTICS_EX_CET_DYNAMIC_APIS_ALLOW_IN_PROC_ONLY = 0x0008,
+ /// Reserved for future use. Not used by MSVC link.exe
+ IMAGE_DLL_CHARACTERISTICS_EX_CET_RESERVED_1 = 0x0010,
+ /// Reserved for future use. Not used by MSVC link.exe
+ IMAGE_DLL_CHARACTERISTICS_EX_CET_RESERVED_2 = 0x0020,
+ /// Image is CFI compatible.
+ IMAGE_DLL_CHARACTERISTICS_EX_FORWARD_CFI_COMPAT = 0x0040,
+ /// Image is hotpatch compatible.
+ IMAGE_DLL_CHARACTERISTICS_EX_HOTPATCH_COMPATIBLE = 0x0080,
};
enum DebugType : unsigned {
diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h
index ad35d7f..749971e 100644
--- a/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/llvm/include/llvm/BinaryFormat/ELF.h
@@ -973,7 +973,10 @@ enum : unsigned {
// SM based processor values.
EF_CUDA_SM100 = 0x6400,
+ EF_CUDA_SM101 = 0x6500,
+ EF_CUDA_SM103 = 0x6700,
EF_CUDA_SM120 = 0x7800,
+ EF_CUDA_SM121 = 0x7900,
// Set when using an accelerator variant like sm_100a.
EF_CUDA_ACCELERATORS = 0x8,
diff --git a/llvm/include/llvm/BinaryFormat/SFrame.h b/llvm/include/llvm/BinaryFormat/SFrame.h
index 98dbe38..0c6c4d1 100644
--- a/llvm/include/llvm/BinaryFormat/SFrame.h
+++ b/llvm/include/llvm/BinaryFormat/SFrame.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Endian.h"
@@ -49,29 +50,27 @@ enum class ABI : uint8_t {
/// SFrame FRE Types. Bits 0-3 of FuncDescEntry.Info.
enum class FREType : uint8_t {
- Addr1 = 0,
- Addr2 = 1,
- Addr4 = 2,
+#define HANDLE_SFRAME_FRE_TYPE(CODE, NAME) NAME = CODE,
+#include "llvm/BinaryFormat/SFrameConstants.def"
};
/// SFrame FDE Types. Bit 4 of FuncDescEntry.Info.
enum class FDEType : uint8_t {
- PCInc = 0,
- PCMask = 1,
+#define HANDLE_SFRAME_FDE_TYPE(CODE, NAME) NAME = CODE,
+#include "llvm/BinaryFormat/SFrameConstants.def"
};
/// Speficies key used for signing return addresses. Bit 5 of
/// FuncDescEntry.Info.
enum class AArch64PAuthKey : uint8_t {
- A = 0,
- B = 1,
+#define HANDLE_SFRAME_AARCH64_PAUTH_KEY(CODE, NAME) NAME = CODE,
+#include "llvm/BinaryFormat/SFrameConstants.def"
};
-/// Size of stack offsets. Bits 5-6 of FREInfo.Info.
+/// Size of stack offsets. Bits 6-7 of FREInfo.Info.
enum class FREOffset : uint8_t {
- B1 = 0,
- B2 = 1,
- B4 = 2,
+#define HANDLE_SFRAME_FRE_OFFSET(CODE, NAME) NAME = CODE,
+#include "llvm/BinaryFormat/SFrameConstants.def"
};
/// Stack frame base register. Bit 0 of FREInfo.Info.
@@ -163,9 +162,13 @@ template <endianness E> using FrameRowEntryAddr1 = FrameRowEntry<uint8_t, E>;
template <endianness E> using FrameRowEntryAddr2 = FrameRowEntry<uint16_t, E>;
template <endianness E> using FrameRowEntryAddr4 = FrameRowEntry<uint32_t, E>;
-ArrayRef<EnumEntry<Version>> getVersions();
-ArrayRef<EnumEntry<Flags>> getFlags();
-ArrayRef<EnumEntry<ABI>> getABIs();
+LLVM_ABI ArrayRef<EnumEntry<Version>> getVersions();
+LLVM_ABI ArrayRef<EnumEntry<Flags>> getFlags();
+LLVM_ABI ArrayRef<EnumEntry<ABI>> getABIs();
+LLVM_ABI ArrayRef<EnumEntry<FREType>> getFRETypes();
+LLVM_ABI ArrayRef<EnumEntry<FDEType>> getFDETypes();
+LLVM_ABI ArrayRef<EnumEntry<AArch64PAuthKey>> getAArch64PAuthKeys();
+LLVM_ABI ArrayRef<EnumEntry<FREOffset>> getFREOffsets();
} // namespace sframe
} // namespace llvm
diff --git a/llvm/include/llvm/BinaryFormat/SFrameConstants.def b/llvm/include/llvm/BinaryFormat/SFrameConstants.def
index 643b15f..fddd440 100644
--- a/llvm/include/llvm/BinaryFormat/SFrameConstants.def
+++ b/llvm/include/llvm/BinaryFormat/SFrameConstants.def
@@ -6,8 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#if !(defined(HANDLE_SFRAME_VERSION) || defined(HANDLE_SFRAME_FLAG) || \
- defined(HANDLE_SFRAME_ABI))
+#if !(defined(HANDLE_SFRAME_VERSION) || defined(HANDLE_SFRAME_FLAG) || \
+ defined(HANDLE_SFRAME_ABI) || defined(HANDLE_SFRAME_FRE_TYPE) || \
+ defined(HANDLE_SFRAME_FDE_TYPE) || \
+ defined(HANDLE_SFRAME_AARCH64_PAUTH_KEY) || \
+ defined(HANDLE_SFRAME_FRE_OFFSET))
#error "Missing HANDLE_SFRAME definition"
#endif
@@ -23,6 +26,22 @@
#define HANDLE_SFRAME_ABI(CODE, NAME)
#endif
+#ifndef HANDLE_SFRAME_FRE_TYPE
+#define HANDLE_SFRAME_FRE_TYPE(CODE, NAME)
+#endif
+
+#ifndef HANDLE_SFRAME_FDE_TYPE
+#define HANDLE_SFRAME_FDE_TYPE(CODE, NAME)
+#endif
+
+#ifndef HANDLE_SFRAME_AARCH64_PAUTH_KEY
+#define HANDLE_SFRAME_AARCH64_PAUTH_KEY(CODE, NAME)
+#endif
+
+#ifndef HANDLE_SFRAME_FRE_OFFSET
+#define HANDLE_SFRAME_FRE_OFFSET(CODE, NAME)
+#endif
+
HANDLE_SFRAME_VERSION(0x01, V1)
HANDLE_SFRAME_VERSION(0x02, V2)
@@ -34,6 +53,24 @@ HANDLE_SFRAME_ABI(0x01, AArch64EndianBig)
HANDLE_SFRAME_ABI(0x02, AArch64EndianLittle)
HANDLE_SFRAME_ABI(0x03, AMD64EndianLittle)
+HANDLE_SFRAME_FRE_TYPE(0x00, Addr1)
+HANDLE_SFRAME_FRE_TYPE(0x01, Addr2)
+HANDLE_SFRAME_FRE_TYPE(0x02, Addr4)
+
+HANDLE_SFRAME_FDE_TYPE(0, PCInc)
+HANDLE_SFRAME_FDE_TYPE(1, PCMask)
+
+HANDLE_SFRAME_AARCH64_PAUTH_KEY(0, A)
+HANDLE_SFRAME_AARCH64_PAUTH_KEY(1, B)
+
+HANDLE_SFRAME_FRE_OFFSET(0, B1)
+HANDLE_SFRAME_FRE_OFFSET(1, B2)
+HANDLE_SFRAME_FRE_OFFSET(2, B4)
+
#undef HANDLE_SFRAME_VERSION
#undef HANDLE_SFRAME_FLAG
#undef HANDLE_SFRAME_ABI
+#undef HANDLE_SFRAME_FRE_TYPE
+#undef HANDLE_SFRAME_FDE_TYPE
+#undef HANDLE_SFRAME_AARCH64_PAUTH_KEY
+#undef HANDLE_SFRAME_FRE_OFFSET
diff --git a/llvm/include/llvm/Bitstream/BitstreamWriter.h b/llvm/include/llvm/Bitstream/BitstreamWriter.h
index 78f5eb4..5f53681 100644
--- a/llvm/include/llvm/Bitstream/BitstreamWriter.h
+++ b/llvm/include/llvm/Bitstream/BitstreamWriter.h
@@ -466,7 +466,7 @@ private:
EmitCode(Abbrev);
- unsigned i = 0, e = static_cast<unsigned>(Abbv->getNumOperandInfos());
+ unsigned i = 0, e = Abbv->getNumOperandInfos();
if (Code) {
assert(e && "Expected non-empty abbreviation");
const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i++);
@@ -632,8 +632,7 @@ private:
void EncodeAbbrev(const BitCodeAbbrev &Abbv) {
EmitCode(bitc::DEFINE_ABBREV);
EmitVBR(Abbv.getNumOperandInfos(), 5);
- for (unsigned i = 0, e = static_cast<unsigned>(Abbv.getNumOperandInfos());
- i != e; ++i) {
+ for (unsigned i = 0, e = Abbv.getNumOperandInfos(); i != e; ++i) {
const BitCodeAbbrevOp &Op = Abbv.getOperandInfo(i);
Emit(Op.isLiteral(), 1);
if (Op.isLiteral()) {
diff --git a/llvm/include/llvm/CodeGen/AsmPrinter.h b/llvm/include/llvm/CodeGen/AsmPrinter.h
index faab2503..91c0142 100644
--- a/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -190,6 +190,36 @@ private:
/// Emit comments in assembly output if this is true.
bool VerboseAsm;
+ /// Store symbols and type identifiers used to create callgraph section
+ /// entries related to a function.
+ struct FunctionInfo {
+ /// Numeric type identifier used in callgraph section for indirect calls
+ /// and targets.
+ using CGTypeId = uint64_t;
+
+ /// Enumeration of function kinds, and their mapping to function kind values
+ /// stored in callgraph section entries.
+ /// Must match the enum in llvm/tools/llvm-objdump/llvm-objdump.cpp.
+ enum class FunctionKind : uint64_t {
+ /// Function cannot be target to indirect calls.
+ NOT_INDIRECT_TARGET = 0,
+
+ /// Function may be target to indirect calls but its type id is unknown.
+ INDIRECT_TARGET_UNKNOWN_TID = 1,
+
+ /// Function may be target to indirect calls and its type id is known.
+ INDIRECT_TARGET_KNOWN_TID = 2,
+ };
+
+ /// Map type identifiers to callsite labels. Labels are generated for each
+ /// indirect callsite in the function.
+ SmallVector<std::pair<CGTypeId, MCSymbol *>> CallSiteLabels;
+ };
+
+ enum CallGraphSectionFormatVersion : uint64_t {
+ V_0 = 0,
+ };
+
/// Output stream for the stack usage file (i.e., .su file).
std::unique_ptr<raw_fd_ostream> StackUsageStream;
@@ -355,6 +385,13 @@ public:
/// are available. Returns empty string otherwise.
StringRef getConstantSectionSuffix(const Constant *C) const;
+ /// Generate and emit labels for callees of the indirect callsites which will
+ /// be used to populate the .callgraph section.
+ void emitIndirectCalleeLabels(
+ FunctionInfo &FuncInfo,
+ const MachineFunction::CallSiteInfoMap &CallSitesInfoMap,
+ const MachineInstr &MI);
+
//===------------------------------------------------------------------===//
// XRay instrumentation implementation.
//===------------------------------------------------------------------===//
@@ -442,6 +479,8 @@ public:
void emitKCFITrapEntry(const MachineFunction &MF, const MCSymbol *Symbol);
virtual void emitKCFITypeId(const MachineFunction &MF);
+ void emitCallGraphSection(const MachineFunction &MF, FunctionInfo &FuncInfo);
+
void emitPseudoProbe(const MachineInstr &MI);
void emitRemarksSection(remarks::RemarkStreamer &RS);
diff --git a/llvm/include/llvm/CodeGen/GCMetadata.h b/llvm/include/llvm/CodeGen/GCMetadata.h
index 33f5301..5b9ee28 100644
--- a/llvm/include/llvm/CodeGen/GCMetadata.h
+++ b/llvm/include/llvm/CodeGen/GCMetadata.h
@@ -101,12 +101,12 @@ private:
// are live per safe point (1.5% on 64-bit hosts).
public:
- GCFunctionInfo(const Function &F, GCStrategy &S);
- ~GCFunctionInfo();
+ LLVM_ABI GCFunctionInfo(const Function &F, GCStrategy &S);
+ LLVM_ABI ~GCFunctionInfo();
/// Handle invalidation explicitly.
- bool invalidate(Function &F, const PreservedAnalyses &PA,
- FunctionAnalysisManager::Invalidator &Inv);
+ LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &Inv);
/// getFunction - Return the function to which this metadata applies.
const Function &getFunction() const { return F; }
@@ -163,8 +163,8 @@ public:
GCStrategyMap(GCStrategyMap &&) = default;
/// Handle invalidation explicitly.
- bool invalidate(Module &M, const PreservedAnalyses &PA,
- ModuleAnalysisManager::Invalidator &Inv);
+ LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
+ ModuleAnalysisManager::Invalidator &Inv);
using iterator = MapT::iterator;
using const_iterator = MapT::const_iterator;
@@ -205,7 +205,7 @@ class CollectorMetadataAnalysis
public:
using Result = GCStrategyMap;
- Result run(Module &M, ModuleAnalysisManager &MAM);
+ LLVM_ABI Result run(Module &M, ModuleAnalysisManager &MAM);
};
/// An analysis pass which caches information about the Function.
@@ -217,7 +217,7 @@ class GCFunctionAnalysis : public AnalysisInfoMixin<GCFunctionAnalysis> {
public:
using Result = GCFunctionInfo;
- Result run(Function &F, FunctionAnalysisManager &FAM);
+ LLVM_ABI Result run(Function &F, FunctionAnalysisManager &FAM);
};
/// LowerIntrinsics - This pass rewrites calls to the llvm.gcread or
@@ -228,7 +228,7 @@ public:
/// This pass requires `CollectorMetadataAnalysis`.
class GCLoweringPass : public PassInfoMixin<GCLoweringPass> {
public:
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+ LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
/// An analysis pass which caches information about the entire Module.
@@ -244,7 +244,7 @@ public:
/// Lookup the GCStrategy object associated with the given gc name.
/// Objects are owned internally; No caller should attempt to delete the
/// returned objects.
- GCStrategy *getGCStrategy(const StringRef Name);
+ LLVM_ABI GCStrategy *getGCStrategy(const StringRef Name);
/// List of per function info objects. In theory, Each of these
/// may be associated with a different GC.
@@ -265,14 +265,14 @@ private:
public:
using iterator = SmallVector<std::unique_ptr<GCStrategy>, 1>::const_iterator;
- static char ID;
+ LLVM_ABI static char ID;
- GCModuleInfo();
+ LLVM_ABI GCModuleInfo();
/// clear - Resets the pass. Any pass, which uses GCModuleInfo, should
/// call it in doFinalization().
///
- void clear();
+ LLVM_ABI void clear();
/// begin/end - Iterators for used strategies.
///
@@ -282,7 +282,7 @@ public:
/// get - Look up function metadata. This is currently assumed
/// have the side effect of initializing the associated GCStrategy. That
/// will soon change.
- GCFunctionInfo &getFunctionInfo(const Function &F);
+ LLVM_ABI GCFunctionInfo &getFunctionInfo(const Function &F);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
index 571ec6d..4292c0b 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
@@ -26,9 +26,9 @@ namespace llvm {
/// A base class for all GenericMachineInstrs.
class GenericMachineInstr : public MachineInstr {
- constexpr static unsigned PoisonFlags = NoUWrap | NoSWrap | NoUSWrap |
- IsExact | Disjoint | NonNeg |
- FmNoNans | FmNoInfs | SameSign;
+ constexpr static unsigned PoisonFlags =
+ NoUWrap | NoSWrap | NoUSWrap | IsExact | Disjoint | NonNeg | FmNoNans |
+ FmNoInfs | SameSign | InBounds;
public:
GenericMachineInstr() = delete;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 756c0b2..99d3cd0 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -518,6 +518,21 @@ public:
const SrcOp &Op1,
std::optional<unsigned> Flags = std::nullopt);
+ /// Build and insert an instruction with appropriate flags for addressing some
+ /// offset of an object, i.e.: \p Res = nuw inbounds G_PTR_ADD \p Op0, \p Op1
+ /// The value of \p Op0 must be a pointer into or just after an object, adding
+ /// the value of \p Op1 to it must yield to a pointer into or just after the
+ /// same object.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+ /// type.
+ /// \pre \p Op1 must be a generic virtual register with scalar type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildObjectPtrOffset(const DstOp &Res, const SrcOp &Op0,
+ const SrcOp &Op1);
+
/// Materialize and insert \p Res = G_PTR_ADD \p Op0, (G_CONSTANT \p Value)
///
/// G_PTR_ADD adds \p Value bytes to the pointer specified by \p Op0,
@@ -534,10 +549,29 @@ public:
/// type as \p Op0 or \p Op0 itself.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- std::optional<MachineInstrBuilder> materializePtrAdd(Register &Res,
- Register Op0,
- const LLT ValueTy,
- uint64_t Value);
+ std::optional<MachineInstrBuilder>
+ materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy,
+ uint64_t Value,
+ std::optional<unsigned> Flags = std::nullopt);
+
+ /// Materialize and insert an instruction with appropriate flags for
+ /// addressing some offset of an object, i.e.:
+ /// \p Res = nuw inbounds G_PTR_ADD \p Op0, (G_CONSTANT \p Value)
+ /// The value of \p Op0 must be a pointer into or just after an object, adding
+ /// \p Value to it must yield to a pointer into or just after the same object.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Op0 must be a generic virtual register with pointer type.
+ /// \pre \p ValueTy must be a scalar type.
+ /// \pre \p Res must be 0. This is to detect confusion between
+ /// materializeObjectPtrOffset() and buildObjectPtrOffset().
+ /// \post \p Res will either be a new generic virtual register of the same
+ /// type as \p Op0 or \p Op0 itself.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ std::optional<MachineInstrBuilder>
+ materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy,
+ uint64_t Value);
/// Build and insert \p Res = G_PTRMASK \p Op0, \p Op1
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0,
diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index 938d71d..9e3d919 100644
--- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -323,10 +323,11 @@ public:
const MachineFunction *getParent() const { return xParent; }
MachineFunction *getParent() { return xParent; }
- /// Returns true if the original IR terminator is an `indirectbr`. This
- /// typically corresponds to a `goto` in C, rather than jump tables.
- bool terminatorIsComputedGoto() const {
- return back().isIndirectBranch() &&
+ /// Returns true if the original IR terminator is an `indirectbr` with
+ /// successor blocks. This typically corresponds to a `goto` in C, rather than
+ /// jump tables.
+ bool terminatorIsComputedGotoWithSuccessors() const {
+ return back().isIndirectBranch() && !succ_empty() &&
llvm::all_of(successors(), [](const MachineBasicBlock *Succ) {
return Succ->isIRBlockAddressTaken();
});
diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h
index 7f88323..06c4daf 100644
--- a/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -517,6 +517,13 @@ public:
SmallVector<ArgRegPair, 1> ArgRegPairs;
/// Callee type ids.
SmallVector<ConstantInt *, 4> CalleeTypeIds;
+
+ CallSiteInfo() = default;
+
+ /// Extracts the numeric type id from the CallBase's callee_type Metadata,
+ /// and sets CalleeTypeIds. This is used as type id for the indirect call in
+ /// the call graph section.
+ CallSiteInfo(const CallBase &CB);
};
struct CalledGlobalInfo {
@@ -524,11 +531,12 @@ public:
unsigned TargetFlags;
};
+ using CallSiteInfoMap = DenseMap<const MachineInstr *, CallSiteInfo>;
+
private:
Delegate *TheDelegate = nullptr;
GISelChangeObserver *Observer = nullptr;
- using CallSiteInfoMap = DenseMap<const MachineInstr *, CallSiteInfo>;
/// Map a call instruction to call site arguments forwarding info.
CallSiteInfoMap CallSitesInfo;
diff --git a/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h b/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
index 1d954cf..1982ac6 100644
--- a/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
+++ b/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
@@ -48,7 +48,7 @@ public:
class FreeMachineFunctionPass : public PassInfoMixin<FreeMachineFunctionPass> {
public:
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+ LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
} // namespace llvm
diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index 94d04b8..10a9b1f 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -122,7 +122,9 @@ public:
Disjoint = 1 << 19, // Each bit is zero in at least one of the inputs.
NoUSWrap = 1 << 20, // Instruction supports geps
// no unsigned signed wrap.
- SameSign = 1 << 21 // Both operands have the same sign.
+ SameSign = 1 << 21, // Both operands have the same sign.
+ InBounds = 1 << 22 // Pointer arithmetic remains inbounds.
+ // Implies NoUSWrap.
};
private:
diff --git a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index e63e77a..e705d7d9 100644
--- a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -69,6 +69,32 @@ enum {
} // end namespace RegState
+/// Set of metadata that should be preserved when using BuildMI(). This provides
+/// a more convenient way of preserving DebugLoc, PCSections and MMRA.
+class MIMetadata {
+public:
+ MIMetadata() = default;
+ MIMetadata(DebugLoc DL, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr)
+ : DL(std::move(DL)), PCSections(PCSections), MMRA(MMRA) {}
+ MIMetadata(const DILocation *DI, MDNode *PCSections = nullptr,
+ MDNode *MMRA = nullptr)
+ : DL(DI), PCSections(PCSections), MMRA(MMRA) {}
+ explicit MIMetadata(const Instruction &From)
+ : DL(From.getDebugLoc()),
+ PCSections(From.getMetadata(LLVMContext::MD_pcsections)) {}
+ explicit MIMetadata(const MachineInstr &From)
+ : DL(From.getDebugLoc()), PCSections(From.getPCSections()) {}
+
+ const DebugLoc &getDL() const { return DL; }
+ MDNode *getPCSections() const { return PCSections; }
+ MDNode *getMMRAMetadata() const { return MMRA; }
+
+private:
+ DebugLoc DL;
+ MDNode *PCSections = nullptr;
+ MDNode *MMRA = nullptr;
+};
+
class MachineInstrBuilder {
MachineFunction *MF = nullptr;
MachineInstr *MI = nullptr;
@@ -317,15 +343,11 @@ public:
}
}
- const MachineInstrBuilder &setPCSections(MDNode *MD) const {
- if (MD)
- MI->setPCSections(*MF, MD);
- return *this;
- }
-
- const MachineInstrBuilder &setMMRAMetadata(MDNode *MMRA) const {
- if (MMRA)
- MI->setMMRAMetadata(*MF, MMRA);
+ const MachineInstrBuilder &copyMIMetadata(const MIMetadata &MIMD) const {
+ if (MIMD.getPCSections())
+ MI->setPCSections(*MF, MIMD.getPCSections());
+ if (MIMD.getMMRAMetadata())
+ MI->setMMRAMetadata(*MF, MIMD.getMMRAMetadata());
return *this;
}
@@ -343,38 +365,11 @@ public:
}
};
-/// Set of metadata that should be preserved when using BuildMI(). This provides
-/// a more convenient way of preserving DebugLoc, PCSections and MMRA.
-class MIMetadata {
-public:
- MIMetadata() = default;
- MIMetadata(DebugLoc DL, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr)
- : DL(std::move(DL)), PCSections(PCSections), MMRA(MMRA) {}
- MIMetadata(const DILocation *DI, MDNode *PCSections = nullptr,
- MDNode *MMRA = nullptr)
- : DL(DI), PCSections(PCSections), MMRA(MMRA) {}
- explicit MIMetadata(const Instruction &From)
- : DL(From.getDebugLoc()),
- PCSections(From.getMetadata(LLVMContext::MD_pcsections)) {}
- explicit MIMetadata(const MachineInstr &From)
- : DL(From.getDebugLoc()), PCSections(From.getPCSections()) {}
-
- const DebugLoc &getDL() const { return DL; }
- MDNode *getPCSections() const { return PCSections; }
- MDNode *getMMRAMetadata() const { return MMRA; }
-
-private:
- DebugLoc DL;
- MDNode *PCSections = nullptr;
- MDNode *MMRA = nullptr;
-};
-
/// Builder interface. Specify how to create the initial instruction itself.
inline MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD,
const MCInstrDesc &MCID) {
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, MIMD.getDL()))
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata());
+ .copyMIMetadata(MIMD);
}
/// This version of the builder sets up the first operand as a
@@ -382,8 +377,7 @@ inline MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD,
inline MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD,
const MCInstrDesc &MCID, Register DestReg) {
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, MIMD.getDL()))
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata())
+ .copyMIMetadata(MIMD)
.addReg(DestReg, RegState::Define);
}
@@ -397,10 +391,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
BB.insert(I, MI);
- return MachineInstrBuilder(MF, MI)
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata())
- .addReg(DestReg, RegState::Define);
+ return MachineInstrBuilder(MF, MI).copyMIMetadata(MIMD).addReg(
+ DestReg, RegState::Define);
}
/// This version of the builder inserts the newly-built instruction before
@@ -416,10 +408,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
BB.insert(I, MI);
- return MachineInstrBuilder(MF, MI)
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata())
- .addReg(DestReg, RegState::Define);
+ return MachineInstrBuilder(MF, MI).copyMIMetadata(MIMD).addReg(
+ DestReg, RegState::Define);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
@@ -449,9 +439,7 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
BB.insert(I, MI);
- return MachineInstrBuilder(MF, MI)
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata());
+ return MachineInstrBuilder(MF, MI).copyMIMetadata(MIMD);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
@@ -461,9 +449,7 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
BB.insert(I, MI);
- return MachineInstrBuilder(MF, MI)
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata());
+ return MachineInstrBuilder(MF, MI).copyMIMetadata(MIMD);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
diff --git a/llvm/include/llvm/CodeGen/MachineInstrBundle.h b/llvm/include/llvm/CodeGen/MachineInstrBundle.h
index ebf7534..65eb5c4 100644
--- a/llvm/include/llvm/CodeGen/MachineInstrBundle.h
+++ b/llvm/include/llvm/CodeGen/MachineInstrBundle.h
@@ -297,8 +297,8 @@ LLVM_ABI PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI,
class FinalizeBundleTestPass : public PassInfoMixin<FinalizeBundleTestPass> {
public:
- PreservedAnalyses run(MachineFunction &MF,
- MachineFunctionAnalysisManager &MFAM);
+ LLVM_ABI PreservedAnalyses run(MachineFunction &MF,
+ MachineFunctionAnalysisManager &MFAM);
};
} // End llvm namespace
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 8f88811..11ae8cd 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -182,7 +182,7 @@ public:
return SDValue(Node, R);
}
- /// Return true if this node is an operand of N.
+ /// Return true if the referenced return value is an operand of N.
LLVM_ABI bool isOperandOf(const SDNode *N) const;
/// Return the ValueType of the referenced return value.
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h b/llvm/include/llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h
index 2834331..b865e02 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h
@@ -17,6 +17,7 @@
#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
#include "llvm/ExecutionEngine/Orc/LazyReexports.h"
+#include "llvm/Support/Compiler.h"
#include <mutex>
@@ -33,7 +34,7 @@ class EPCIndirectionUtils {
public:
/// ABI support base class. Used to write resolver, stub, and trampoline
/// blocks.
- class ABISupport {
+ class LLVM_ABI ABISupport {
protected:
ABISupport(unsigned PointerSize, unsigned TrampolineSize, unsigned StubSize,
unsigned StubToPointerMaxDisplacement, unsigned ResolverCodeSize)
@@ -81,7 +82,7 @@ public:
CreateWithABI(ExecutorProcessControl &EPC);
/// Create based on the ExecutorProcessControl triple.
- static Expected<std::unique_ptr<EPCIndirectionUtils>>
+ LLVM_ABI static Expected<std::unique_ptr<EPCIndirectionUtils>>
Create(ExecutorProcessControl &EPC);
/// Create based on the ExecutorProcessControl triple.
@@ -98,27 +99,27 @@ public:
/// Release memory for resources held by this instance. This *must* be called
/// prior to destruction of the class.
- Error cleanup();
+ LLVM_ABI Error cleanup();
/// Write resolver code to the executor process and return its address.
/// This must be called before any call to createTrampolinePool or
/// createLazyCallThroughManager.
- Expected<ExecutorAddr> writeResolverBlock(ExecutorAddr ReentryFnAddr,
- ExecutorAddr ReentryCtxAddr);
+ LLVM_ABI Expected<ExecutorAddr>
+ writeResolverBlock(ExecutorAddr ReentryFnAddr, ExecutorAddr ReentryCtxAddr);
/// Returns the address of the Resolver block. Returns zero if the
/// writeResolverBlock method has not previously been called.
ExecutorAddr getResolverBlockAddress() const { return ResolverBlockAddr; }
/// Create an IndirectStubsManager for the executor process.
- std::unique_ptr<IndirectStubsManager> createIndirectStubsManager();
+ LLVM_ABI std::unique_ptr<IndirectStubsManager> createIndirectStubsManager();
/// Create a TrampolinePool for the executor process.
- TrampolinePool &getTrampolinePool();
+ LLVM_ABI TrampolinePool &getTrampolinePool();
/// Create a LazyCallThroughManager.
/// This function should only be called once.
- LazyCallThroughManager &
+ LLVM_ABI LazyCallThroughManager &
createLazyCallThroughManager(ExecutionSession &ES,
ExecutorAddr ErrorHandlerAddr);
@@ -170,7 +171,7 @@ private:
/// called.
///
/// This function is experimental and likely subject to revision.
-Error setUpInProcessLCTMReentryViaEPCIU(EPCIndirectionUtils &EPCIU);
+LLVM_ABI Error setUpInProcessLCTMReentryViaEPCIU(EPCIndirectionUtils &EPCIU);
namespace detail {
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h b/llvm/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h
index c92719e..a9f5c45 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h
@@ -16,6 +16,7 @@
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/Speculation.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -24,8 +25,8 @@ namespace orc {
// Provides common code.
class SpeculateQuery {
protected:
- void findCalles(const BasicBlock *, DenseSet<StringRef> &);
- bool isStraightLine(const Function &F);
+ LLVM_ABI void findCalles(const BasicBlock *, DenseSet<StringRef> &);
+ LLVM_ABI bool isStraightLine(const Function &F);
public:
using ResultTy = std::optional<DenseMap<StringRef, DenseSet<StringRef>>>;
@@ -37,7 +38,7 @@ class BlockFreqQuery : public SpeculateQuery {
public:
// Find likely next executables based on IR Block Frequency
- ResultTy operator()(Function &F);
+ LLVM_ABI ResultTy operator()(Function &F);
};
// This Query generates a sequence of basic blocks which follows the order of
@@ -73,7 +74,7 @@ private:
VisitedBlocksInfoTy &);
public:
- ResultTy operator()(Function &F);
+ LLVM_ABI ResultTy operator()(Function &F);
};
} // namespace orc
diff --git a/llvm/include/llvm/Frontend/HLSL/HLSLBinding.h b/llvm/include/llvm/Frontend/HLSL/HLSLBinding.h
new file mode 100644
index 0000000..70a2eeb
--- /dev/null
+++ b/llvm/include/llvm/Frontend/HLSL/HLSLBinding.h
@@ -0,0 +1,162 @@
+//===- HLSLBinding.h - Representation for resource bindings in HLSL -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file This file contains objects to represent resource bindings.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FRONTEND_HLSL_HLSLBINDING_H
+#define LLVM_FRONTEND_HLSL_HLSLBINDING_H
+
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DXILABI.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+namespace hlsl {
+
+/// BindingInfo represents the ranges of bindings and free space for each
+/// `dxil::ResourceClass`. This can represent HLSL-level bindings as well as
+/// bindings described in root signatures, and can be used for analysis of
+/// overlapping or missing bindings as well as for finding space for implicit
+/// bindings.
+///
+/// As an example, given these resource bindings:
+///
+/// RWBuffer<float> A[10] : register(u3);
+/// RWBuffer<float> B[] : register(u5, space2)
+///
+/// The binding info for UAV bindings should look like this:
+///
+/// UAVSpaces {
+/// ResClass = ResourceClass::UAV,
+/// Spaces = {
+/// { Space = 0u, FreeRanges = {{ 0u, 2u }, { 13u, ~0u }} },
+/// { Space = 2u, FreeRanges = {{ 0u, 4u }} }
+/// }
+/// }
+class BindingInfo {
+public:
+ struct BindingRange {
+ uint32_t LowerBound;
+ uint32_t UpperBound;
+ BindingRange(uint32_t LB, uint32_t UB) : LowerBound(LB), UpperBound(UB) {}
+ };
+
+ struct RegisterSpace {
+ uint32_t Space;
+ SmallVector<BindingRange> FreeRanges;
+ RegisterSpace(uint32_t Space) : Space(Space) {
+ FreeRanges.emplace_back(0, ~0u);
+ }
+ // Size == -1 means unbounded array
+ LLVM_ABI std::optional<uint32_t> findAvailableBinding(int32_t Size);
+ };
+
+ struct BindingSpaces {
+ dxil::ResourceClass RC;
+ llvm::SmallVector<RegisterSpace> Spaces;
+ BindingSpaces(dxil::ResourceClass RC) : RC(RC) {}
+ LLVM_ABI RegisterSpace &getOrInsertSpace(uint32_t Space);
+ };
+
+private:
+ BindingSpaces SRVSpaces{dxil::ResourceClass::SRV};
+ BindingSpaces UAVSpaces{dxil::ResourceClass::UAV};
+ BindingSpaces CBufferSpaces{dxil::ResourceClass::CBuffer};
+ BindingSpaces SamplerSpaces{dxil::ResourceClass::Sampler};
+
+public:
+ BindingSpaces &getBindingSpaces(dxil::ResourceClass RC) {
+ switch (RC) {
+ case dxil::ResourceClass::SRV:
+ return SRVSpaces;
+ case dxil::ResourceClass::UAV:
+ return UAVSpaces;
+ case dxil::ResourceClass::CBuffer:
+ return CBufferSpaces;
+ case dxil::ResourceClass::Sampler:
+ return SamplerSpaces;
+ }
+
+ llvm_unreachable("Invalid resource class");
+ }
+ const BindingSpaces &getBindingSpaces(dxil::ResourceClass RC) const {
+ return const_cast<BindingInfo *>(this)->getBindingSpaces(RC);
+ }
+
+ // Size == -1 means unbounded array
+ LLVM_ABI std::optional<uint32_t>
+ findAvailableBinding(dxil::ResourceClass RC, uint32_t Space, int32_t Size);
+
+ friend class BindingInfoBuilder;
+};
+
+/// Builder class for creating a /c BindingInfo.
+class BindingInfoBuilder {
+public:
+ struct Binding {
+ dxil::ResourceClass RC;
+ uint32_t Space;
+ uint32_t LowerBound;
+ uint32_t UpperBound;
+ const void *Cookie;
+
+ Binding(dxil::ResourceClass RC, uint32_t Space, uint32_t LowerBound,
+ uint32_t UpperBound, const void *Cookie)
+ : RC(RC), Space(Space), LowerBound(LowerBound), UpperBound(UpperBound),
+ Cookie(Cookie) {}
+
+ bool isUnbounded() const { return UpperBound == ~0U; }
+
+ bool operator==(const Binding &RHS) const {
+ return std::tie(RC, Space, LowerBound, UpperBound, Cookie) ==
+ std::tie(RHS.RC, RHS.Space, RHS.LowerBound, RHS.UpperBound,
+ RHS.Cookie);
+ }
+ bool operator!=(const Binding &RHS) const { return !(*this == RHS); }
+
+ bool operator<(const Binding &RHS) const {
+ return std::tie(RC, Space, LowerBound) <
+ std::tie(RHS.RC, RHS.Space, RHS.LowerBound);
+ }
+ };
+
+private:
+ SmallVector<Binding> Bindings;
+
+public:
+ void trackBinding(dxil::ResourceClass RC, uint32_t Space, uint32_t LowerBound,
+ uint32_t UpperBound, const void *Cookie) {
+ Bindings.emplace_back(RC, Space, LowerBound, UpperBound, Cookie);
+ }
+ /// Calculate the binding info - \c ReportOverlap will be called once for each
+ /// overlapping binding.
+ BindingInfo calculateBindingInfo(
+ llvm::function_ref<void(const BindingInfoBuilder &Builder,
+ const Binding &Overlapping)>
+ ReportOverlap);
+
+ /// Calculate the binding info - \c HasOverlap will be set to indicate whether
+ /// there are any overlapping bindings.
+ BindingInfo calculateBindingInfo(bool &HasOverlap) {
+ HasOverlap = false;
+ return calculateBindingInfo(
+ [&HasOverlap](auto, auto) { HasOverlap = true; });
+ }
+
+ /// For use in the \c ReportOverlap callback of \c calculateBindingInfo -
+ /// finds a binding that the \c ReportedBinding overlaps with.
+ const Binding &findOverlapping(const Binding &ReportedBinding) const;
+};
+
+} // namespace hlsl
+} // namespace llvm
+
+#endif // LLVM_FRONTEND_HLSL_HLSLBINDING_H
diff --git a/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h b/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h
index 6fa51ed..0bd0774 100644
--- a/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h
+++ b/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h
@@ -14,6 +14,7 @@
#ifndef LLVM_FRONTEND_HLSL_ROOTSIGNATUREMETADATA_H
#define LLVM_FRONTEND_HLSL_ROOTSIGNATUREMETADATA_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/Frontend/HLSL/HLSLRootSignature.h"
#include "llvm/IR/Constants.h"
#include "llvm/MC/DXContainerRootSignature.h"
@@ -26,6 +27,80 @@ class Metadata;
namespace hlsl {
namespace rootsig {
+template <typename T>
+class RootSignatureValidationError
+ : public ErrorInfo<RootSignatureValidationError<T>> {
+public:
+ static char ID;
+ StringRef ParamName;
+ T Value;
+
+ RootSignatureValidationError(StringRef ParamName, T Value)
+ : ParamName(ParamName), Value(Value) {}
+
+ void log(raw_ostream &OS) const override {
+ OS << "Invalid value for " << ParamName << ": " << Value;
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+};
+
+class GenericRSMetadataError : public ErrorInfo<GenericRSMetadataError> {
+public:
+ static char ID;
+ StringRef Message;
+ MDNode *MD;
+
+ GenericRSMetadataError(StringRef Message, MDNode *MD)
+ : Message(Message), MD(MD) {}
+
+ void log(raw_ostream &OS) const override {
+ OS << Message;
+ if (MD) {
+ OS << "\n";
+ MD->printTree(OS);
+ }
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+};
+
+class InvalidRSMetadataFormat : public ErrorInfo<InvalidRSMetadataFormat> {
+public:
+ static char ID;
+ StringRef ElementName;
+
+ InvalidRSMetadataFormat(StringRef ElementName) : ElementName(ElementName) {}
+
+ void log(raw_ostream &OS) const override {
+ OS << "Invalid format for " << ElementName;
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+};
+
+class InvalidRSMetadataValue : public ErrorInfo<InvalidRSMetadataValue> {
+public:
+ static char ID;
+ StringRef ParamName;
+
+ InvalidRSMetadataValue(StringRef ParamName) : ParamName(ParamName) {}
+
+ void log(raw_ostream &OS) const override {
+ OS << "Invalid value for " << ParamName;
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+};
+
class MetadataBuilder {
public:
MetadataBuilder(llvm::LLVMContext &Ctx, ArrayRef<RootElement> Elements)
@@ -66,29 +141,27 @@ class MetadataParser {
public:
MetadataParser(MDNode *Root) : Root(Root) {}
- LLVM_ABI bool ParseRootSignature(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD);
+ LLVM_ABI llvm::Expected<llvm::mcdxbc::RootSignatureDesc>
+ ParseRootSignature(uint32_t Version);
private:
- bool parseRootFlags(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootFlagNode);
- bool parseRootConstants(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootConstantNode);
- bool parseRootDescriptors(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootDescriptorNode,
- RootSignatureElementKind ElementKind);
- bool parseDescriptorRange(LLVMContext *Ctx, mcdxbc::DescriptorTable &Table,
- MDNode *RangeDescriptorNode);
- bool parseDescriptorTable(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *DescriptorTableNode);
- bool parseRootSignatureElement(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *Element);
- bool parseStaticSampler(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *StaticSamplerNode);
-
- bool validateRootSignature(LLVMContext *Ctx,
- const llvm::mcdxbc::RootSignatureDesc &RSD);
+ llvm::Error parseRootFlags(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootFlagNode);
+ llvm::Error parseRootConstants(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootConstantNode);
+ llvm::Error parseRootDescriptors(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootDescriptorNode,
+ RootSignatureElementKind ElementKind);
+ llvm::Error parseDescriptorRange(mcdxbc::DescriptorTable &Table,
+ MDNode *RangeDescriptorNode);
+ llvm::Error parseDescriptorTable(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *DescriptorTableNode);
+ llvm::Error parseRootSignatureElement(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *Element);
+ llvm::Error parseStaticSampler(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *StaticSamplerNode);
+
+ llvm::Error validateRootSignature(const llvm::mcdxbc::RootSignatureDesc &RSD);
MDNode *Root;
};
diff --git a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
index 7919f7a..ce1cedc 100644
--- a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
+++ b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
@@ -578,8 +578,9 @@ struct DynamicAllocatorsT {
template <typename T, typename I, typename E> //
struct EnterT {
using List = ObjectListT<I, E>;
- using WrapperTrait = std::true_type;
- List v;
+ ENUM(Modifier, Automap);
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(Modifier), List> t;
};
// V5.2: [5.6.2] `exclusive` clause
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td
index 1b94657..f11eccc 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -179,7 +179,7 @@ def OMPC_DynamicAllocators : Clause<[Spelling<"dynamic_allocators">]> {
let clangClass = "OMPDynamicAllocatorsClause";
}
def OMPC_Enter : Clause<[Spelling<"enter">]> {
- let flangClass = "OmpObjectList";
+ let flangClass = "OmpEnterClause";
}
def OMPC_Exclusive : Clause<[Spelling<"exclusive">]> {
let clangClass = "OMPExclusiveClause";
diff --git a/llvm/include/llvm/IR/GCStrategy.h b/llvm/include/llvm/IR/GCStrategy.h
index 6b81355..44e46e4 100644
--- a/llvm/include/llvm/IR/GCStrategy.h
+++ b/llvm/include/llvm/IR/GCStrategy.h
@@ -47,6 +47,7 @@
#ifndef LLVM_IR_GCSTRATEGY_H
#define LLVM_IR_GCSTRATEGY_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Registry.h"
#include <optional>
#include <string>
@@ -81,7 +82,7 @@ protected:
bool UsesMetadata = false; ///< If set, backend must emit metadata tables.
public:
- GCStrategy();
+ LLVM_ABI GCStrategy();
virtual ~GCStrategy() = default;
/// Return the name of the GC strategy. This is the value of the collector
@@ -145,7 +146,7 @@ using GCRegistry = Registry<GCStrategy>;
extern template class LLVM_TEMPLATE_ABI Registry<GCStrategy>;
/// Lookup the GCStrategy object associated with the given gc name.
-std::unique_ptr<GCStrategy> getGCStrategy(const StringRef Name);
+LLVM_ABI std::unique_ptr<GCStrategy> getGCStrategy(const StringRef Name);
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 0318427..2e13896 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -996,14 +996,6 @@ public:
return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
}
- /// FIXME: Remove this function once transition to Align is over.
- /// Use getDestAlign() instead.
- LLVM_DEPRECATED("Use getDestAlign() instead", "getDestAlign")
- unsigned getDestAlignment() const {
- if (auto MA = getParamAlign(ARG_DEST))
- return MA->value();
- return 0;
- }
MaybeAlign getDestAlign() const { return getParamAlign(ARG_DEST); }
/// Set the specified arguments of the instruction.
@@ -1057,15 +1049,6 @@ public:
return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
}
- /// FIXME: Remove this function once transition to Align is over.
- /// Use getSourceAlign() instead.
- LLVM_DEPRECATED("Use getSourceAlign() instead", "getSourceAlign")
- unsigned getSourceAlignment() const {
- if (auto MA = BaseCL::getParamAlign(ARG_SOURCE))
- return MA->value();
- return 0;
- }
-
MaybeAlign getSourceAlign() const {
return BaseCL::getParamAlign(ARG_SOURCE);
}
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 3a7db6d..7265a76 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -593,6 +593,14 @@ def int_amdgcn_tanh : DefaultAttrsIntrinsic<
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
+def int_amdgcn_cvt_sr_pk_f16_f32 : DefaultAttrsIntrinsic<
+ [llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable]
+>, ClangBuiltin<"__builtin_amdgcn_cvt_sr_pk_f16_f32">;
+
+def int_amdgcn_cvt_sr_pk_bf16_f32 : DefaultAttrsIntrinsic<
+ [llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable]
+>, ClangBuiltin<"__builtin_amdgcn_cvt_sr_pk_bf16_f32">;
+
def int_amdgcn_cvt_pk_f16_fp8 : DefaultAttrsIntrinsic<
[llvm_v2f16_ty], [llvm_i16_ty], [IntrNoMem, IntrSpeculatable]
>, ClangBuiltin<"__builtin_amdgcn_cvt_pk_f16_fp8">;
@@ -601,18 +609,57 @@ def int_amdgcn_cvt_pk_f16_bf8 : DefaultAttrsIntrinsic<
[llvm_v2f16_ty], [llvm_i16_ty], [IntrNoMem, IntrSpeculatable]
>, ClangBuiltin<"__builtin_amdgcn_cvt_pk_f16_bf8">;
-class AMDGPUCvtScaleF32Intrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
- [DstTy], [Src0Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
+def int_amdgcn_cvt_pk_fp8_f16
+ : DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_v2f16_ty],
+ [IntrNoMem, IntrSpeculatable]>,
+ ClangBuiltin<"__builtin_amdgcn_cvt_pk_fp8_f16">;
+
+def int_amdgcn_cvt_pk_bf8_f16
+ : DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_v2f16_ty],
+ [IntrNoMem, IntrSpeculatable]>,
+ ClangBuiltin<"__builtin_amdgcn_cvt_pk_bf8_f16">;
+
+// llvm.amdgcn.cvt.sr.fp8.f16 i32 vdst, half src, i32 seed, i32 old, imm byte_sel [0..3]
+// byte_sel selects byte to write in vdst.
+def int_amdgcn_cvt_sr_fp8_f16 : DefaultAttrsIntrinsic<
+ [llvm_i32_ty], [llvm_half_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
+>, ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f16">;
+
+// llvm.amdgcn.cvt.sr.bf8.f16 i32 vdst, half src, i32 seed, i32 old, imm byte_sel [0..3]
+// byte_sel selects byte to write in vdst.
+def int_amdgcn_cvt_sr_bf8_f16 : DefaultAttrsIntrinsic<
+ [llvm_i32_ty], [llvm_half_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
+>, ClangBuiltin<"__builtin_amdgcn_cvt_sr_bf8_f16">;
+
+// llvm.amdgcn.cvt.scale.pk32.f16.bf6 v32f16 vdst, v6i32 src0, i32 scale_sel [0..7]
+class AMDGPUCvtScaleIntrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
+ [DstTy], [Src0Ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]
>, ClangBuiltin<"__builtin_amdgcn_"#name>;
-class AMDGPUCvtScaleF32ToFP6BF6Intrinsic<LLVMType DstTy, LLVMType Src0Ty, LLVMType Src1Ty, string name> : DefaultAttrsIntrinsic<
- [DstTy], [Src0Ty, Src1Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
+class AMDGPUCvtScaleF32Intrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
+ [DstTy], [Src0Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
>, ClangBuiltin<"__builtin_amdgcn_"#name>;
class AMDGPUCvtScaleF32SRIntrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
[DstTy], [Src0Ty, llvm_i32_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
>, ClangBuiltin<"__builtin_amdgcn_"#name>;
+def int_amdgcn_cvt_scale_pk8_f16_fp8 : AMDGPUCvtScaleIntrinsic<llvm_v8f16_ty, llvm_v2i32_ty, "cvt_scale_pk8_f16_fp8">;
+def int_amdgcn_cvt_scale_pk8_bf16_fp8 : AMDGPUCvtScaleIntrinsic<llvm_v8bf16_ty, llvm_v2i32_ty, "cvt_scale_pk8_bf16_fp8">;
+def int_amdgcn_cvt_scale_pk8_f16_bf8 : AMDGPUCvtScaleIntrinsic<llvm_v8f16_ty, llvm_v2i32_ty, "cvt_scale_pk8_f16_bf8">;
+def int_amdgcn_cvt_scale_pk8_bf16_bf8 : AMDGPUCvtScaleIntrinsic<llvm_v8bf16_ty, llvm_v2i32_ty, "cvt_scale_pk8_bf16_bf8">;
+def int_amdgcn_cvt_scale_pk8_f16_fp4 : AMDGPUCvtScaleIntrinsic<llvm_v8f16_ty, llvm_i32_ty, "cvt_scale_pk8_f16_fp4">;
+def int_amdgcn_cvt_scale_pk8_bf16_fp4 : AMDGPUCvtScaleIntrinsic<llvm_v8bf16_ty, llvm_i32_ty, "cvt_scale_pk8_bf16_fp4">;
+def int_amdgcn_cvt_scale_pk8_f32_fp8 : AMDGPUCvtScaleIntrinsic<llvm_v8f32_ty, llvm_v2i32_ty, "cvt_scale_pk8_f32_fp8">;
+def int_amdgcn_cvt_scale_pk8_f32_bf8 : AMDGPUCvtScaleIntrinsic<llvm_v8f32_ty, llvm_v2i32_ty, "cvt_scale_pk8_f32_bf8">;
+def int_amdgcn_cvt_scale_pk8_f32_fp4 : AMDGPUCvtScaleIntrinsic<llvm_v8f32_ty, llvm_i32_ty, "cvt_scale_pk8_f32_fp4">;
+
+class AMDGPUCvtScaleF32ToFP6BF6Intrinsic<LLVMType DstTy, LLVMType Src0Ty, LLVMType Src1Ty, string name> : DefaultAttrsIntrinsic<
+ [DstTy], [Src0Ty, Src1Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
+>, ClangBuiltin<"__builtin_amdgcn_"#name>;
+
def int_amdgcn_cvt_scalef32_pk32_fp6_f16 : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32f16_ty, "cvt_scalef32_pk32_fp6_f16">;
def int_amdgcn_cvt_scalef32_pk32_bf6_f16 : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32f16_ty, "cvt_scalef32_pk32_bf6_f16">;
def int_amdgcn_cvt_scalef32_pk32_fp6_bf16 : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32bf16_ty, "cvt_scalef32_pk32_fp6_bf16">;
@@ -3473,6 +3520,12 @@ def int_amdgcn_cvt_pk_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_fp8_f32">,
[llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty],
[IntrNoMem, ImmArg<ArgIndex<3>>]>;
+// llvm.amdgcn.cvt.pk.fp8.f32.e5m3 int vdst, float srcA, float srcB, int old, imm word_sel
+def int_amdgcn_cvt_pk_fp8_f32_e5m3 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_fp8_f32_e5m3">,
+ DefaultAttrsIntrinsic<[llvm_i32_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
// llvm.amdgcn.cvt.sr.bf8.f32 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3]
// byte_sel selects byte to write into vdst.
def int_amdgcn_cvt_sr_bf8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_bf8_f32">,
@@ -3486,6 +3539,12 @@ def int_amdgcn_cvt_sr_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f32">,
[llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<3>>]>;
+// llvm.amdgcn.cvt.sr.fp8.f32.e5m3 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3]
+def int_amdgcn_cvt_sr_fp8_f32_e5m3 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f32_e5m3">,
+ DefaultAttrsIntrinsic<[llvm_i32_ty],
+ [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
// llvm.amdgcn.cvt.off.fp32.i4 int srcA
def int_amdgcn_cvt_off_f32_i4: ClangBuiltin<"__builtin_amdgcn_cvt_off_f32_i4">,
DefaultAttrsIntrinsic<[llvm_float_ty],
@@ -3643,6 +3702,50 @@ def int_amdgcn_fdiv_fast : DefaultAttrsIntrinsic<
[IntrNoMem, IntrSpeculatable]
>;
+class AMDGPUAsyncGlobalLoadToLDS : Intrinsic <
+ [],
+ [global_ptr_ty, // Base global pointer to load from
+ local_ptr_ty, // LDS base pointer to store to.
+ llvm_i32_ty, // offset
+ llvm_i32_ty], // gfx12+ cachepolicy:
+ // bits [0-2] = th
+ // bits [3-4] = scope
+ [IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>, WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<0>>,
+ NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree],
+ "", [SDNPMemOperand]
+>;
+
+class AMDGPUAsyncGlobalStoreFromLDS : Intrinsic <
+ [],
+ [global_ptr_ty, // Base global pointer to store to
+ local_ptr_ty, // LDS base pointer to load from
+ llvm_i32_ty, // offset
+ llvm_i32_ty], // gfx12+ cachepolicy:
+ // bits [0-2] = th
+ // bits [3-4] = scope
+ [IntrInaccessibleMemOrArgMemOnly, WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>, NoCapture<ArgIndex<0>>,
+ NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree],
+ "", [SDNPMemOperand]
+>;
+
+def int_amdgcn_global_load_async_to_lds_b8 :
+ ClangBuiltin<"__builtin_amdgcn_global_load_async_to_lds_b8">, AMDGPUAsyncGlobalLoadToLDS;
+def int_amdgcn_global_load_async_to_lds_b32 :
+ ClangBuiltin<"__builtin_amdgcn_global_load_async_to_lds_b32">, AMDGPUAsyncGlobalLoadToLDS;
+def int_amdgcn_global_load_async_to_lds_b64 :
+ ClangBuiltin<"__builtin_amdgcn_global_load_async_to_lds_b64">, AMDGPUAsyncGlobalLoadToLDS;
+def int_amdgcn_global_load_async_to_lds_b128 :
+ ClangBuiltin<"__builtin_amdgcn_global_load_async_to_lds_b128">, AMDGPUAsyncGlobalLoadToLDS;
+
+def int_amdgcn_global_store_async_from_lds_b8 :
+ ClangBuiltin<"__builtin_amdgcn_global_store_async_from_lds_b8">, AMDGPUAsyncGlobalStoreFromLDS;
+def int_amdgcn_global_store_async_from_lds_b32 :
+ ClangBuiltin<"__builtin_amdgcn_global_store_async_from_lds_b32">, AMDGPUAsyncGlobalStoreFromLDS;
+def int_amdgcn_global_store_async_from_lds_b64 :
+ ClangBuiltin<"__builtin_amdgcn_global_store_async_from_lds_b64">, AMDGPUAsyncGlobalStoreFromLDS;
+def int_amdgcn_global_store_async_from_lds_b128 :
+ ClangBuiltin<"__builtin_amdgcn_global_store_async_from_lds_b128">, AMDGPUAsyncGlobalStoreFromLDS;
+
// WMMA intrinsics.
class AMDGPUWmmaIntrinsicModsAB<LLVMType AB, LLVMType CD> :
Intrinsic<
diff --git a/llvm/include/llvm/IR/NVVMIntrinsicUtils.h b/llvm/include/llvm/IR/NVVMIntrinsicUtils.h
index 0fd5de3..11bfd73 100644
--- a/llvm/include/llvm/IR/NVVMIntrinsicUtils.h
+++ b/llvm/include/llvm/IR/NVVMIntrinsicUtils.h
@@ -408,7 +408,7 @@ inline APFloat::roundingMode GetRCPRoundingMode(Intrinsic::ID IntrinsicID) {
llvm_unreachable("Checking rounding mode for invalid rcp intrinsic");
}
-inline DenormalMode GetNVVMDenromMode(bool ShouldFTZ) {
+inline DenormalMode GetNVVMDenormMode(bool ShouldFTZ) {
if (ShouldFTZ)
return DenormalMode::getPreserveSign();
return DenormalMode::getIEEE();
diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.h b/llvm/include/llvm/IR/RuntimeLibcalls.h
index 89ad4e5..eb882c4 100644
--- a/llvm/include/llvm/IR/RuntimeLibcalls.h
+++ b/llvm/include/llvm/IR/RuntimeLibcalls.h
@@ -134,7 +134,7 @@ struct RuntimeLibcallsInfo {
/// Check if this is valid libcall for the current module, otherwise
/// RTLIB::Unsupported.
- RTLIB::LibcallImpl getSupportedLibcallImpl(StringRef FuncName) const;
+ LLVM_ABI RTLIB::LibcallImpl getSupportedLibcallImpl(StringRef FuncName) const;
private:
static const RTLIB::LibcallImpl
diff --git a/llvm/include/llvm/LTO/LTO.h b/llvm/include/llvm/LTO/LTO.h
index d8e632b..323c478 100644
--- a/llvm/include/llvm/LTO/LTO.h
+++ b/llvm/include/llvm/LTO/LTO.h
@@ -542,21 +542,23 @@ private:
ArrayRef<SymbolResolution> Res, unsigned Partition,
bool InSummary);
- // These functions take a range of symbol resolutions [ResI, ResE) and consume
- // the resolutions used by a single input module by incrementing ResI. After
- // these functions return, [ResI, ResE) will refer to the resolution range for
- // the remaining modules in the InputFile.
- Error addModule(InputFile &Input, unsigned ModI,
- const SymbolResolution *&ResI, const SymbolResolution *ResE);
-
- Expected<RegularLTOState::AddedModule>
- addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
- const SymbolResolution *&ResI, const SymbolResolution *ResE);
+ // These functions take a range of symbol resolutions and consume the
+ // resolutions used by a single input module. Functions return ranges refering
+ // to the resolutions for the remaining modules in the InputFile.
+ Expected<ArrayRef<SymbolResolution>>
+ addModule(InputFile &Input, ArrayRef<SymbolResolution> InputRes,
+ unsigned ModI, ArrayRef<SymbolResolution> Res);
+
+ Expected<std::pair<RegularLTOState::AddedModule, ArrayRef<SymbolResolution>>>
+ addRegularLTO(InputFile &Input, ArrayRef<SymbolResolution> InputRes,
+ BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+ ArrayRef<SymbolResolution> Res);
Error linkRegularLTO(RegularLTOState::AddedModule Mod,
bool LivenessFromIndex);
- Error addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
- const SymbolResolution *&ResI, const SymbolResolution *ResE);
+ Expected<ArrayRef<SymbolResolution>>
+ addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+ ArrayRef<SymbolResolution> Res);
Error runRegularLTO(AddStreamFn AddStream);
Error runThinLTO(AddStreamFn AddStream, FileCache Cache,
diff --git a/llvm/include/llvm/MC/DXContainerRootSignature.h b/llvm/include/llvm/MC/DXContainerRootSignature.h
index 14a2429..3c7c886 100644
--- a/llvm/include/llvm/MC/DXContainerRootSignature.h
+++ b/llvm/include/llvm/MC/DXContainerRootSignature.h
@@ -10,6 +10,7 @@
#define LLVM_MC_DXCONTAINERROOTSIGNATURE_H
#include "llvm/BinaryFormat/DXContainer.h"
+#include "llvm/Support/Compiler.h"
#include <cstdint>
#include <limits>
@@ -113,9 +114,9 @@ struct RootSignatureDesc {
mcdxbc::RootParametersContainer ParametersContainer;
SmallVector<dxbc::RTS0::v1::StaticSampler> StaticSamplers;
- void write(raw_ostream &OS) const;
+ LLVM_ABI void write(raw_ostream &OS) const;
- size_t getSize() const;
+ LLVM_ABI size_t getSize() const;
};
} // namespace mcdxbc
} // namespace llvm
diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h
index 467ad4e..4853701 100644
--- a/llvm/include/llvm/MC/MCAssembler.h
+++ b/llvm/include/llvm/MC/MCAssembler.h
@@ -209,7 +209,7 @@ public:
LLVM_ABI bool registerSection(MCSection &Section);
LLVM_ABI bool registerSymbol(const MCSymbol &Symbol);
- void addRelocDirective(RelocDirective RD);
+ LLVM_ABI void addRelocDirective(RelocDirective RD);
LLVM_ABI void reportError(SMLoc L, const Twine &Msg) const;
// Record pending errors during layout iteration, as they may go away once the
diff --git a/llvm/include/llvm/MC/MCObjectFileInfo.h b/llvm/include/llvm/MC/MCObjectFileInfo.h
index 5ce58ae..d69560c 100644
--- a/llvm/include/llvm/MC/MCObjectFileInfo.h
+++ b/llvm/include/llvm/MC/MCObjectFileInfo.h
@@ -69,6 +69,9 @@ protected:
/// Language Specific Data Area information is emitted to.
MCSection *LSDASection = nullptr;
+ /// Section containing call graph metadata.
+ MCSection *CallGraphSection = nullptr;
+
/// If exception handling is supported by the target and the target can
/// support a compact representation of the CIE and FDE, this is the section
/// to emit them into.
@@ -359,6 +362,8 @@ public:
MCSection *getFaultMapSection() const { return FaultMapSection; }
MCSection *getRemarksSection() const { return RemarksSection; }
+ MCSection *getCallGraphSection(const MCSection &TextSec) const;
+
MCSection *getStackSizesSection(const MCSection &TextSec) const;
MCSection *getBBAddrMapSection(const MCSection &TextSec) const;
diff --git a/llvm/include/llvm/MC/MCObjectStreamer.h b/llvm/include/llvm/MC/MCObjectStreamer.h
index 5ac7aba..4b43a8f 100644
--- a/llvm/include/llvm/MC/MCObjectStreamer.h
+++ b/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -52,10 +52,6 @@ class MCObjectStreamer : public MCStreamer {
DenseMap<const MCSymbol *, SmallVector<PendingAssignment, 1>>
pendingAssignments;
- SmallVector<std::unique_ptr<char[]>, 0> FragStorage;
- // Available bytes in the current block for trailing data or new fragments.
- size_t FragSpace = 0;
-
void emitInstToData(const MCInst &Inst, const MCSubtargetInfo &);
void emitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
void emitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override;
@@ -88,18 +84,11 @@ public:
// Add a fragment with a variable-size tail and start a new empty fragment.
void insert(MCFragment *F);
- char *getCurFragEnd() const {
- return reinterpret_cast<char *>(CurFrag + 1) + CurFrag->getFixedSize();
- }
- MCFragment *allocFragSpace(size_t Headroom);
// Add a new fragment to the current section without a variable-size tail.
void newFragment();
- void ensureHeadroom(size_t Headroom);
void appendContents(ArrayRef<char> Contents);
void appendContents(size_t Num, char Elt);
- // Add a fixup to the current fragment. Call ensureHeadroom beforehand to
- // ensure the fixup and appended content apply to the same fragment.
void addFixup(const MCExpr *Value, MCFixupKind Kind);
void emitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc()) override;
diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h
index 3bd986f..df8f617b 100644
--- a/llvm/include/llvm/MC/MCSection.h
+++ b/llvm/include/llvm/MC/MCSection.h
@@ -93,7 +93,8 @@ protected:
// Track content and fixups for the fixed-size part as fragments are
// appended to the section. The content remains immutable, except when
// modified by applyFixup.
- uint32_t FixedSize = 0;
+ uint32_t ContentStart = 0;
+ uint32_t ContentEnd = 0;
uint32_t FixupStart = 0;
uint32_t FixupEnd = 0;
@@ -187,18 +188,30 @@ public:
//== Content-related functions manage parent's storage using ContentStart and
// ContentSize.
+ // Get a SmallVector reference. The caller should call doneAppending to update
+ // `ContentEnd`.
+ SmallVectorImpl<char> &getContentsForAppending();
+ void doneAppending();
+ void appendContents(ArrayRef<char> Contents) {
+ getContentsForAppending().append(Contents.begin(), Contents.end());
+ doneAppending();
+ }
+ void appendContents(size_t Num, char Elt) {
+ getContentsForAppending().append(Num, Elt);
+ doneAppending();
+ }
MutableArrayRef<char> getContents();
ArrayRef<char> getContents() const;
- void setVarContents(ArrayRef<char> Contents);
+ LLVM_ABI void setVarContents(ArrayRef<char> Contents);
void clearVarContents() { setVarContents({}); }
MutableArrayRef<char> getVarContents();
ArrayRef<char> getVarContents() const;
- size_t getFixedSize() const { return FixedSize; }
+ size_t getFixedSize() const { return ContentEnd - ContentStart; }
size_t getVarSize() const { return VarContentEnd - VarContentStart; }
size_t getSize() const {
- return FixedSize + (VarContentEnd - VarContentStart);
+ return ContentEnd - ContentStart + (VarContentEnd - VarContentStart);
}
//== Fixup-related functions manage parent's storage using FixupStart and
@@ -211,7 +224,7 @@ public:
// Source fixup offsets are relative to the variable part's start.
// Stored fixup offsets are relative to the fixed part's start.
- void setVarFixups(ArrayRef<MCFixup> Fixups);
+ LLVM_ABI void setVarFixups(ArrayRef<MCFixup> Fixups);
void clearVarFixups() { setVarFixups({}); }
MutableArrayRef<MCFixup> getVarFixups();
ArrayRef<MCFixup> getVarFixups() const;
@@ -621,11 +634,28 @@ public:
bool isBssSection() const { return IsBss; }
};
+inline SmallVectorImpl<char> &MCFragment::getContentsForAppending() {
+ SmallVectorImpl<char> &S = getParent()->ContentStorage;
+ if (LLVM_UNLIKELY(ContentEnd != S.size())) {
+ // Move the elements to the end. Reserve space to avoid invalidating
+ // S.begin()+I for `append`.
+ auto Size = ContentEnd - ContentStart;
+ auto I = std::exchange(ContentStart, S.size());
+ S.reserve(S.size() + Size);
+ S.append(S.begin() + I, S.begin() + I + Size);
+ }
+ return S;
+}
+inline void MCFragment::doneAppending() {
+ ContentEnd = getParent()->ContentStorage.size();
+}
inline MutableArrayRef<char> MCFragment::getContents() {
- return {reinterpret_cast<char *>(this + 1), FixedSize};
+ return MutableArrayRef(getParent()->ContentStorage)
+ .slice(ContentStart, ContentEnd - ContentStart);
}
inline ArrayRef<char> MCFragment::getContents() const {
- return {reinterpret_cast<const char *>(this + 1), FixedSize};
+ return ArrayRef(getParent()->ContentStorage)
+ .slice(ContentStart, ContentEnd - ContentStart);
}
inline MutableArrayRef<char> MCFragment::getVarContents() {
diff --git a/llvm/include/llvm/Object/SFrameParser.h b/llvm/include/llvm/Object/SFrameParser.h
index cf4fe20..245e7ba 100644
--- a/llvm/include/llvm/Object/SFrameParser.h
+++ b/llvm/include/llvm/Object/SFrameParser.h
@@ -11,6 +11,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/BinaryFormat/SFrame.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include <cstdint>
@@ -19,11 +20,14 @@ namespace object {
template <endianness E> class SFrameParser {
public:
- static Expected<SFrameParser> create(ArrayRef<uint8_t> Contents);
+ static Expected<SFrameParser> create(ArrayRef<uint8_t> Contents,
+ uint64_t SectionAddress);
const sframe::Preamble<E> &getPreamble() const { return Header.Preamble; }
const sframe::Header<E> &getHeader() const { return Header; }
+ Expected<ArrayRef<uint8_t>> getAuxHeader() const;
+
bool usesFixedRAOffset() const {
return getHeader().ABIArch == sframe::ABI::AMD64EndianLittle;
}
@@ -31,16 +35,29 @@ public:
return false; // Not used in any currently defined ABI.
}
+ using FDERange = ArrayRef<sframe::FuncDescEntry<E>>;
+ Expected<FDERange> fdes() const;
+
+ // Decodes the start address of the given FDE, which must be one of the
+ // objects returned by the `fdes()` function.
+ uint64_t getAbsoluteStartAddress(typename FDERange::iterator FDE) const;
+
private:
ArrayRef<uint8_t> Data;
+ uint64_t SectionAddress;
const sframe::Header<E> &Header;
- SFrameParser(ArrayRef<uint8_t> Data, const sframe::Header<E> &Header)
- : Data(Data), Header(Header) {}
+ SFrameParser(ArrayRef<uint8_t> Data, uint64_t SectionAddress,
+ const sframe::Header<E> &Header)
+ : Data(Data), SectionAddress(SectionAddress), Header(Header) {}
+
+ uint64_t getFDEBase() const {
+ return sizeof(Header) + Header.AuxHdrLen + Header.FDEOff;
+ }
};
-extern template class SFrameParser<endianness::big>;
-extern template class SFrameParser<endianness::little>;
+extern template class LLVM_TEMPLATE_ABI SFrameParser<endianness::big>;
+extern template class LLVM_TEMPLATE_ABI SFrameParser<endianness::little>;
} // end namespace object
} // end namespace llvm
diff --git a/llvm/include/llvm/ObjectYAML/ELFYAML.h b/llvm/include/llvm/ObjectYAML/ELFYAML.h
index e883f2f..3bf8c29 100644
--- a/llvm/include/llvm/ObjectYAML/ELFYAML.h
+++ b/llvm/include/llvm/ObjectYAML/ELFYAML.h
@@ -117,7 +117,7 @@ struct FileHeader {
llvm::yaml::Hex8 ABIVersion;
ELF_ET Type;
std::optional<ELF_EM> Machine;
- ELF_EF Flags;
+ std::optional<ELF_EF> Flags;
llvm::yaml::Hex64 Entry;
std::optional<StringRef> SectionHeaderStringTable;
diff --git a/llvm/include/llvm/Passes/CodeGenPassBuilder.h b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
index b0360f1..97c3ff8 100644
--- a/llvm/include/llvm/Passes/CodeGenPassBuilder.h
+++ b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
@@ -114,13 +114,16 @@
#include "llvm/Target/CGPassBuilderOption.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/CFGuard.h"
+#include "llvm/Transforms/ObjCARC.h"
#include "llvm/Transforms/Scalar/ConstantHoisting.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include "llvm/Transforms/Scalar/LoopStrengthReduce.h"
+#include "llvm/Transforms/Scalar/LoopTermFold.h"
#include "llvm/Transforms/Scalar/LowerConstantIntrinsics.h"
#include "llvm/Transforms/Scalar/MergeICmps.h"
#include "llvm/Transforms/Scalar/PartiallyInlineLibCalls.h"
#include "llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h"
+#include "llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h"
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/LowerInvoke.h"
#include <cassert>
@@ -754,7 +757,12 @@ void CodeGenPassBuilder<Derived, TargetMachineT>::addIRPasses(
// Run loop strength reduction before anything else.
if (getOptLevel() != CodeGenOptLevel::None && !Opt.DisableLSR) {
- addPass(createFunctionToLoopPassAdaptor(LoopStrengthReducePass(),
+ LoopPassManager LPM;
+ LPM.addPass(CanonicalizeFreezeInLoopsPass());
+ LPM.addPass(LoopStrengthReducePass());
+ if (Opt.EnableLoopTermFold)
+ LPM.addPass(LoopTermFoldPass());
+ addPass(createFunctionToLoopPassAdaptor(std::move(LPM),
/*UseMemorySSA=*/true));
}
@@ -799,7 +807,8 @@ void CodeGenPassBuilder<Derived, TargetMachineT>::addIRPasses(
addPass(ScalarizeMaskedMemIntrinPass());
// Expand reduction intrinsics into shuffle sequences if the target wants to.
- addPass(ExpandReductionsPass());
+ if (!Opt.DisableExpandReductions)
+ addPass(ExpandReductionsPass());
// Convert conditional moves to conditional jumps when profitable.
if (getOptLevel() != CodeGenOptLevel::None && !Opt.DisableSelectOptimize)
@@ -877,6 +886,9 @@ void CodeGenPassBuilder<Derived, TargetMachineT>::addISelPrepare(
if (Opt.RequiresCodeGenSCCOrder)
addPass.requireCGSCCOrder();
+ if (getOptLevel() != CodeGenOptLevel::None)
+ addPass(ObjCARCContractPass());
+
addPass(CallBrPreparePass());
// Add both the safe stack and the stack protection passes: each of them will
// only protect functions that have corresponding attributes.
diff --git a/llvm/include/llvm/ProfileData/MemProfData.inc b/llvm/include/llvm/ProfileData/MemProfData.inc
index 3f785bd..26baddd 100644
--- a/llvm/include/llvm/ProfileData/MemProfData.inc
+++ b/llvm/include/llvm/ProfileData/MemProfData.inc
@@ -33,11 +33,10 @@
(uint64_t)'o' << 24 | (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129)
// The version number of the raw binary format.
-#define MEMPROF_RAW_VERSION 4ULL
+#define MEMPROF_RAW_VERSION 5ULL
// Currently supported versions.
-#define MEMPROF_RAW_SUPPORTED_VERSIONS \
- { 3ULL, 4ULL }
+#define MEMPROF_RAW_SUPPORTED_VERSIONS {3ULL, 4ULL, 5ULL}
#define MEMPROF_V3_MIB_SIZE 132ULL;
@@ -229,6 +228,41 @@ void Merge(const MemInfoBlock &newMIB) {
} __attribute__((__packed__));
#endif
+constexpr int MantissaBits = 12;
+constexpr int ExponentBits = 4;
+constexpr uint16_t MaxMantissa = (1U << MantissaBits) - 1;
+constexpr uint16_t MaxExponent = (1U << ExponentBits) - 1;
+constexpr uint64_t MaxRepresentableValue = static_cast<uint64_t>(MaxMantissa)
+ << MaxExponent;
+
+// Encodes a 64-bit unsigned integer into a 16-bit scaled integer format.
+inline uint16_t encodeHistogramCount(uint64_t Count) {
+ if (Count == 0)
+ return 0;
+
+ if (Count > MaxRepresentableValue)
+ Count = MaxRepresentableValue;
+
+ if (Count <= MaxMantissa)
+ return Count;
+
+ uint64_t M = Count;
+ uint16_t E = 0;
+ while (M > MaxMantissa) {
+ M = (M + 1) >> 1;
+ E++;
+ }
+ return (E << MantissaBits) | static_cast<uint16_t>(M);
+}
+
+// Decodes a 16-bit scaled integer and returns the
+// decoded 64-bit unsigned integer.
+inline uint64_t decodeHistogramCount(uint16_t EncodedValue) {
+ const uint16_t E = EncodedValue >> MantissaBits;
+ const uint16_t M = EncodedValue & MaxMantissa;
+ return static_cast<uint64_t>(M) << E;
+}
+
} // namespace memprof
} // namespace llvm
diff --git a/llvm/include/llvm/Support/AArch64AttributeParser.h b/llvm/include/llvm/Support/AArch64AttributeParser.h
index 796dbfd..f4552ef 100644
--- a/llvm/include/llvm/Support/AArch64AttributeParser.h
+++ b/llvm/include/llvm/Support/AArch64AttributeParser.h
@@ -34,7 +34,7 @@ struct AArch64BuildAttrSubsections {
uint32_t AndFeatures = 0;
};
-AArch64BuildAttrSubsections
+LLVM_ABI AArch64BuildAttrSubsections
extractBuildAttributesSubsections(const llvm::AArch64AttributeParser &);
} // namespace llvm
diff --git a/llvm/include/llvm/Support/VirtualFileSystem.h b/llvm/include/llvm/Support/VirtualFileSystem.h
index 734b795..d976773 100644
--- a/llvm/include/llvm/Support/VirtualFileSystem.h
+++ b/llvm/include/llvm/Support/VirtualFileSystem.h
@@ -1069,7 +1069,7 @@ public:
/// Redirect each of the remapped files from first to second.
static std::unique_ptr<RedirectingFileSystem>
create(ArrayRef<std::pair<std::string, std::string>> RemappedFiles,
- bool UseExternalNames, FileSystem &ExternalFS);
+ bool UseExternalNames, IntrusiveRefCntPtr<FileSystem> ExternalFS);
ErrorOr<Status> status(const Twine &Path) override;
bool exists(const Twine &Path) override;
diff --git a/llvm/include/llvm/Support/Windows/WindowsSupport.h b/llvm/include/llvm/Support/Windows/WindowsSupport.h
index ffc6fdf..f35e7b5 100644
--- a/llvm/include/llvm/Support/Windows/WindowsSupport.h
+++ b/llvm/include/llvm/Support/Windows/WindowsSupport.h
@@ -245,6 +245,10 @@ LLVM_ABI std::error_code widenPath(const Twine &Path8,
SmallVectorImpl<wchar_t> &Path16,
size_t MaxPathLen = MAX_PATH);
+/// Retrieves the handle to a in-memory system module such as ntdll.dll, while
+/// ensuring we're not retrieving a malicious injected module but a module
+/// loaded from the system path.
+LLVM_ABI HMODULE loadSystemModuleSecure(LPCWSTR lpModuleName);
} // end namespace windows
} // end namespace sys
} // end namespace llvm.
diff --git a/llvm/include/llvm/Target/CGPassBuilderOption.h b/llvm/include/llvm/Target/CGPassBuilderOption.h
index f29cbe7..8d0a7e6 100644
--- a/llvm/include/llvm/Target/CGPassBuilderOption.h
+++ b/llvm/include/llvm/Target/CGPassBuilderOption.h
@@ -52,6 +52,8 @@ struct CGPassBuilderOption {
bool EnableMachineFunctionSplitter = false;
bool EnableSinkAndFold = false;
bool EnableTailMerge = true;
+ /// Enable LoopTermFold immediately after LSR.
+ bool EnableLoopTermFold = false;
bool MISchedPostRA = false;
bool EarlyLiveIntervals = false;
bool GCEmptyBlocks = false;
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index fc81ab7..b619de3 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -194,6 +194,7 @@ def IsExact : MIFlagEnum<"IsExact">;
def NoSWrap : MIFlagEnum<"NoSWrap">;
def NoUWrap : MIFlagEnum<"NoUWrap">;
def NonNeg : MIFlagEnum<"NonNeg">;
+def InBounds : MIFlagEnum<"InBounds">;
def MIFlags;
// def not; -> Already defined as a SDNode
diff --git a/llvm/include/llvm/TextAPI/SymbolSet.h b/llvm/include/llvm/TextAPI/SymbolSet.h
index a04cb35..42c411a 100644
--- a/llvm/include/llvm/TextAPI/SymbolSet.h
+++ b/llvm/include/llvm/TextAPI/SymbolSet.h
@@ -92,7 +92,7 @@ private:
public:
SymbolSet() = default;
- ~SymbolSet();
+ LLVM_ABI ~SymbolSet();
LLVM_ABI Symbol *addGlobal(EncodeKind Kind, StringRef Name, SymbolFlags Flags,
const Target &Targ);
size_t size() const { return Symbols.size(); }
diff --git a/llvm/include/llvm/Transforms/Scalar/Reassociate.h b/llvm/include/llvm/Transforms/Scalar/Reassociate.h
index a5d13766..749f6ee 100644
--- a/llvm/include/llvm/Transforms/Scalar/Reassociate.h
+++ b/llvm/include/llvm/Transforms/Scalar/Reassociate.h
@@ -28,6 +28,7 @@
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Compiler.h"
#include <deque>
namespace llvm {
@@ -96,7 +97,7 @@ protected:
bool MadeChange;
public:
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+ LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
private:
void BuildRankMap(Function &F, ReversePostOrderTraversal<Function *> &RPOT);
diff --git a/llvm/include/llvm/Transforms/Utils/Mem2Reg.h b/llvm/include/llvm/Transforms/Utils/Mem2Reg.h
index 76c1c2c..d0006bf 100644
--- a/llvm/include/llvm/Transforms/Utils/Mem2Reg.h
+++ b/llvm/include/llvm/Transforms/Utils/Mem2Reg.h
@@ -15,6 +15,7 @@
#define LLVM_TRANSFORMS_UTILS_MEM2REG_H
#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -22,7 +23,7 @@ class Function;
class PromotePass : public PassInfoMixin<PromotePass> {
public:
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+ LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Transforms/Utils/ProfileVerify.h b/llvm/include/llvm/Transforms/Utils/ProfileVerify.h
index 7834305..5c9c44c 100644
--- a/llvm/include/llvm/Transforms/Utils/ProfileVerify.h
+++ b/llvm/include/llvm/Transforms/Utils/ProfileVerify.h
@@ -15,13 +15,14 @@
#include "llvm/IR/Analysis.h"
#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
/// Inject MD_prof metadata where it's missing. Used for testing that passes
/// don't accidentally drop this metadata.
class ProfileInjectorPass : public PassInfoMixin<ProfileInjectorPass> {
public:
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+ LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
/// Checks that MD_prof is present on every instruction that supports it. Used
@@ -29,7 +30,7 @@ public:
/// valid (i.e. !{!"unknown"})
class ProfileVerifierPass : public PassInfoMixin<ProfileVerifierPass> {
public:
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+ LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
} // namespace llvm
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index cfde787..16dd6f8 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -175,6 +175,7 @@ add_llvm_component_library(LLVMAnalysis
LINK_COMPONENTS
BinaryFormat
Core
+ FrontendHLSL
Object
ProfileData
Support
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 759c553..2d52f34 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -1373,7 +1373,7 @@ Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *Inst,
if (ConstantFP *CFP = dyn_cast<ConstantFP>(Operand))
return flushDenormalConstantFP(CFP, Inst, IsOutput);
- if (isa<ConstantAggregateZero, UndefValue, ConstantExpr>(Operand))
+ if (isa<ConstantAggregateZero, UndefValue>(Operand))
return Operand;
Type *Ty = Operand->getType();
@@ -1389,6 +1389,9 @@ Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *Inst,
Ty = VecTy->getElementType();
}
+ if (isa<ConstantExpr>(Operand))
+ return Operand;
+
if (const auto *CV = dyn_cast<ConstantVector>(Operand)) {
SmallVector<Constant *, 16> NewElts;
for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
@@ -2628,14 +2631,14 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
case Intrinsic::nvvm_ceil_d:
return ConstantFoldFP(
ceil, APF, Ty,
- nvvm::GetNVVMDenromMode(
+ nvvm::GetNVVMDenormMode(
nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID)));
case Intrinsic::nvvm_fabs_ftz:
case Intrinsic::nvvm_fabs:
return ConstantFoldFP(
fabs, APF, Ty,
- nvvm::GetNVVMDenromMode(
+ nvvm::GetNVVMDenormMode(
nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID)));
case Intrinsic::nvvm_floor_ftz_f:
@@ -2643,7 +2646,7 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
case Intrinsic::nvvm_floor_d:
return ConstantFoldFP(
floor, APF, Ty,
- nvvm::GetNVVMDenromMode(
+ nvvm::GetNVVMDenormMode(
nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID)));
case Intrinsic::nvvm_rcp_rm_ftz_f:
@@ -2705,7 +2708,7 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
return nullptr;
return ConstantFoldFP(
sqrt, APF, Ty,
- nvvm::GetNVVMDenromMode(
+ nvvm::GetNVVMDenormMode(
nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID)));
// AMDGCN Intrinsics:
diff --git a/llvm/lib/Analysis/DXILResource.cpp b/llvm/lib/Analysis/DXILResource.cpp
index 1959ab6..629fa7cd 100644
--- a/llvm/lib/Analysis/DXILResource.cpp
+++ b/llvm/lib/Analysis/DXILResource.cpp
@@ -995,18 +995,7 @@ SmallVector<dxil::ResourceInfo *> DXILResourceMap::findByUse(const Value *Key) {
//===----------------------------------------------------------------------===//
void DXILResourceBindingInfo::populate(Module &M, DXILResourceTypeMap &DRTM) {
- struct Binding {
- ResourceClass RC;
- uint32_t Space;
- uint32_t LowerBound;
- uint32_t UpperBound;
- Value *Name;
- Binding(ResourceClass RC, uint32_t Space, uint32_t LowerBound,
- uint32_t UpperBound, Value *Name)
- : RC(RC), Space(Space), LowerBound(LowerBound), UpperBound(UpperBound),
- Name(Name) {}
- };
- SmallVector<Binding> Bindings;
+ hlsl::BindingInfoBuilder Builder;
// collect all of the llvm.dx.resource.handlefrombinding calls;
// make a note if there is llvm.dx.resource.handlefromimplicitbinding
@@ -1036,133 +1025,20 @@ void DXILResourceBindingInfo::populate(Module &M, DXILResourceTypeMap &DRTM) {
assert((Size < 0 || (unsigned)LowerBound + Size - 1 <= UINT32_MAX) &&
"upper bound register overflow");
uint32_t UpperBound = Size < 0 ? UINT32_MAX : LowerBound + Size - 1;
- Bindings.emplace_back(RTI.getResourceClass(), Space, LowerBound,
- UpperBound, Name);
+ Builder.trackBinding(RTI.getResourceClass(), Space, LowerBound,
+ UpperBound, Name);
}
break;
}
case Intrinsic::dx_resource_handlefromimplicitbinding: {
- ImplicitBinding = true;
+ HasImplicitBinding = true;
break;
}
}
}
- // sort all the collected bindings
- llvm::stable_sort(Bindings, [](auto &LHS, auto &RHS) {
- return std::tie(LHS.RC, LHS.Space, LHS.LowerBound) <
- std::tie(RHS.RC, RHS.Space, RHS.LowerBound);
- });
-
- // remove duplicates
- Binding *NewEnd = llvm::unique(Bindings, [](auto &LHS, auto &RHS) {
- return std::tie(LHS.RC, LHS.Space, LHS.LowerBound, LHS.UpperBound,
- LHS.Name) == std::tie(RHS.RC, RHS.Space, RHS.LowerBound,
- RHS.UpperBound, RHS.Name);
- });
- if (NewEnd != Bindings.end())
- Bindings.erase(NewEnd);
-
- // Go over the sorted bindings and build up lists of free register ranges
- // for each binding type and used spaces. Bindings are sorted by resource
- // class, space, and lower bound register slot.
- BindingSpaces *BS = &SRVSpaces;
- for (const Binding &B : Bindings) {
- if (BS->RC != B.RC)
- // move to the next resource class spaces
- BS = &getBindingSpaces(B.RC);
-
- RegisterSpace *S = BS->Spaces.empty() ? &BS->Spaces.emplace_back(B.Space)
- : &BS->Spaces.back();
- assert(S->Space <= B.Space && "bindings not sorted correctly?");
- if (B.Space != S->Space)
- // add new space
- S = &BS->Spaces.emplace_back(B.Space);
-
- // The space is full - there are no free slots left, or the rest of the
- // slots are taken by an unbounded array. Set flag to report overlapping
- // binding later.
- if (S->FreeRanges.empty() || S->FreeRanges.back().UpperBound < UINT32_MAX) {
- OverlappingBinding = true;
- continue;
- }
-
- // adjust the last free range lower bound, split it in two, or remove it
- BindingRange &LastFreeRange = S->FreeRanges.back();
- if (LastFreeRange.LowerBound == B.LowerBound) {
- if (B.UpperBound < UINT32_MAX)
- LastFreeRange.LowerBound = B.UpperBound + 1;
- else
- S->FreeRanges.pop_back();
- } else if (LastFreeRange.LowerBound < B.LowerBound) {
- LastFreeRange.UpperBound = B.LowerBound - 1;
- if (B.UpperBound < UINT32_MAX)
- S->FreeRanges.emplace_back(B.UpperBound + 1, UINT32_MAX);
- } else {
- OverlappingBinding = true;
- if (B.UpperBound < UINT32_MAX)
- LastFreeRange.LowerBound =
- std::max(LastFreeRange.LowerBound, B.UpperBound + 1);
- else
- S->FreeRanges.pop_back();
- }
- }
-}
-
-// returns std::nulopt if binding could not be found in given space
-std::optional<uint32_t>
-DXILResourceBindingInfo::findAvailableBinding(dxil::ResourceClass RC,
- uint32_t Space, int32_t Size) {
- BindingSpaces &BS = getBindingSpaces(RC);
- RegisterSpace &RS = BS.getOrInsertSpace(Space);
- return RS.findAvailableBinding(Size);
-}
-
-DXILResourceBindingInfo::RegisterSpace &
-DXILResourceBindingInfo::BindingSpaces::getOrInsertSpace(uint32_t Space) {
- for (auto *I = Spaces.begin(); I != Spaces.end(); ++I) {
- if (I->Space == Space)
- return *I;
- if (I->Space < Space)
- continue;
- return *Spaces.insert(I, Space);
- }
- return Spaces.emplace_back(Space);
-}
-
-std::optional<uint32_t>
-DXILResourceBindingInfo::RegisterSpace::findAvailableBinding(int32_t Size) {
- assert((Size == -1 || Size > 0) && "invalid size");
-
- if (FreeRanges.empty())
- return std::nullopt;
-
- // unbounded array
- if (Size == -1) {
- BindingRange &Last = FreeRanges.back();
- if (Last.UpperBound != UINT32_MAX)
- // this space is already occupied by an unbounded array
- return std::nullopt;
- uint32_t RegSlot = Last.LowerBound;
- FreeRanges.pop_back();
- return RegSlot;
- }
-
- // single resource or fixed-size array
- for (BindingRange &R : FreeRanges) {
- // compare the size as uint64_t to prevent overflow for range (0,
- // UINT32_MAX)
- if ((uint64_t)R.UpperBound - R.LowerBound + 1 < (uint64_t)Size)
- continue;
- uint32_t RegSlot = R.LowerBound;
- // This might create a range where (LowerBound == UpperBound + 1). When
- // that happens, the next time this function is called the range will
- // skipped over by the check above (at this point Size is always > 0).
- R.LowerBound += Size;
- return RegSlot;
- }
-
- return std::nullopt;
+ Bindings = Builder.calculateBindingInfo(
+ [this](auto, auto) { this->HasOverlappingBinding = true; });
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 0990a0d..477e477 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -2682,6 +2682,20 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
return getAddExpr(NewOps, PreservedFlags);
}
}
+
+ // Try to push the constant operand into a ZExt: A + zext (-A + B) -> zext
+ // (B), if trunc (A) + -A + B does not unsigned-wrap.
+ const SCEVAddExpr *InnerAdd;
+ if (match(B, m_scev_ZExt(m_scev_Add(InnerAdd)))) {
+ const SCEV *NarrowA = getTruncateExpr(A, InnerAdd->getType());
+ if (NarrowA == getNegativeSCEV(InnerAdd->getOperand(0)) &&
+ getZeroExtendExpr(NarrowA, B->getType()) == A &&
+ hasFlags(StrengthenNoWrapFlags(this, scAddExpr, {NarrowA, InnerAdd},
+ SCEV::FlagAnyWrap),
+ SCEV::FlagNUW)) {
+ return getZeroExtendExpr(getAddExpr(NarrowA, InnerAdd), B->getType());
+ }
+ }
}
// Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y)
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 55ba52a..c7eb2ec 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1486,6 +1486,10 @@ void TargetTransformInfo::collectKernelLaunchBounds(
return TTIImpl->collectKernelLaunchBounds(F, LB);
}
+bool TargetTransformInfo::allowVectorElementIndexingUsingGEP() const {
+ return TTIImpl->allowVectorElementIndexingUsingGEP();
+}
+
TargetTransformInfoImplBase::~TargetTransformInfoImplBase() = default;
TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
diff --git a/llvm/lib/Analysis/UniformityAnalysis.cpp b/llvm/lib/Analysis/UniformityAnalysis.cpp
index 15107c2..2e4063f 100644
--- a/llvm/lib/Analysis/UniformityAnalysis.cpp
+++ b/llvm/lib/Analysis/UniformityAnalysis.cpp
@@ -178,6 +178,7 @@ bool UniformityInfoWrapperPass::runOnFunction(Function &F) {
void UniformityInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
OS << "UniformityInfo for function '" << m_function->getName() << "':\n";
+ m_uniformityInfo.print(OS);
}
void UniformityInfoWrapperPass::releaseMemory() {
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index 150ddce..b3b4c37 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -1117,7 +1117,7 @@ Constant *
llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
const InterleaveGroup<Instruction> &Group) {
// All 1's means mask is not needed.
- if (Group.getNumMembers() == Group.getFactor())
+ if (Group.isFull())
return nullptr;
// TODO: support reversed access.
@@ -1663,7 +1663,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
// Case 1: A full group. Can Skip the checks; For full groups, if the wide
// load would wrap around the address space we would do a memory access at
// nullptr even without the transformation.
- if (Group->getNumMembers() == Group->getFactor())
+ if (Group->isFull())
continue;
// Case 2: If first and last members of the group don't wrap this implies
@@ -1698,7 +1698,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
// Case 1: A full group. Can Skip the checks; For full groups, if the wide
// store would wrap around the address space we would do a memory access at
// nullptr even without the transformation.
- if (Group->getNumMembers() == Group->getFactor())
+ if (Group->isFull())
continue;
// Interleave-store-group with gaps is implemented using masked wide store.
diff --git a/llvm/lib/BinaryFormat/SFrame.cpp b/llvm/lib/BinaryFormat/SFrame.cpp
index 3b436af..f1765d7 100644
--- a/llvm/lib/BinaryFormat/SFrame.cpp
+++ b/llvm/lib/BinaryFormat/SFrame.cpp
@@ -35,3 +35,36 @@ ArrayRef<EnumEntry<sframe::ABI>> sframe::getABIs() {
};
return ArrayRef(ABIs);
}
+
+ArrayRef<EnumEntry<sframe::FREType>> sframe::getFRETypes() {
+ static constexpr EnumEntry<sframe::FREType> FRETypes[] = {
+#define HANDLE_SFRAME_FRE_TYPE(CODE, NAME) {#NAME, sframe::FREType::NAME},
+#include "llvm/BinaryFormat/SFrameConstants.def"
+ };
+ return ArrayRef(FRETypes);
+}
+
+ArrayRef<EnumEntry<sframe::FDEType>> sframe::getFDETypes() {
+ static constexpr EnumEntry<sframe::FDEType> FDETypes[] = {
+#define HANDLE_SFRAME_FDE_TYPE(CODE, NAME) {#NAME, sframe::FDEType::NAME},
+#include "llvm/BinaryFormat/SFrameConstants.def"
+ };
+ return ArrayRef(FDETypes);
+}
+
+ArrayRef<EnumEntry<sframe::AArch64PAuthKey>> sframe::getAArch64PAuthKeys() {
+ static constexpr EnumEntry<sframe::AArch64PAuthKey> AArch64PAuthKeys[] = {
+#define HANDLE_SFRAME_AARCH64_PAUTH_KEY(CODE, NAME) \
+ {#NAME, sframe::AArch64PAuthKey::NAME},
+#include "llvm/BinaryFormat/SFrameConstants.def"
+ };
+ return ArrayRef(AArch64PAuthKeys);
+}
+
+ArrayRef<EnumEntry<sframe::FREOffset>> sframe::getFREOffsets() {
+ static constexpr EnumEntry<sframe::FREOffset> FREOffsets[] = {
+#define HANDLE_SFRAME_FRE_OFFSET(CODE, NAME) {#NAME, sframe::FREOffset::NAME},
+#include "llvm/BinaryFormat/SFrameConstants.def"
+ };
+ return ArrayRef(FREOffsets);
+}
diff --git a/llvm/lib/CGData/StableFunctionMapRecord.cpp b/llvm/lib/CGData/StableFunctionMapRecord.cpp
index 4e4fcef..423e068 100644
--- a/llvm/lib/CGData/StableFunctionMapRecord.cpp
+++ b/llvm/lib/CGData/StableFunctionMapRecord.cpp
@@ -160,14 +160,18 @@ void StableFunctionMapRecord::deserialize(const unsigned char *&Ptr,
for (unsigned I = 0; I < NumFuncs; ++I) {
auto Hash =
endian::readNext<stable_hash, endianness::little, unaligned>(Ptr);
- auto FunctionNameId =
+ [[maybe_unused]] auto FunctionNameId =
endian::readNext<uint32_t, endianness::little, unaligned>(Ptr);
- assert(FunctionMap->getNameForId(FunctionNameId) &&
- "FunctionNameId out of range");
- auto ModuleNameId =
+ [[maybe_unused]] auto ModuleNameId =
endian::readNext<uint32_t, endianness::little, unaligned>(Ptr);
- assert(FunctionMap->getNameForId(ModuleNameId) &&
- "ModuleNameId out of range");
+ // Only validate IDs if we've read the names
+ if (ReadStableFunctionMapNames) {
+ assert(FunctionMap->getNameForId(FunctionNameId) &&
+ "FunctionNameId out of range");
+ assert(FunctionMap->getNameForId(ModuleNameId) &&
+ "ModuleNameId out of range");
+ }
+
auto InstCount =
endian::readNext<uint32_t, endianness::little, unaligned>(Ptr);
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 6166271..1641c3e 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1654,6 +1654,88 @@ void AsmPrinter::emitStackUsage(const MachineFunction &MF) {
*StackUsageStream << "static\n";
}
+/// Extracts a generalized numeric type identifier of a Function's type from
+/// type metadata. Returns null if metadata cannot be found.
+static ConstantInt *extractNumericCGTypeId(const Function &F) {
+ SmallVector<MDNode *, 2> Types;
+ F.getMetadata(LLVMContext::MD_type, Types);
+ for (const auto &Type : Types) {
+ if (Type->hasGeneralizedMDString()) {
+ MDString *MDGeneralizedTypeId = cast<MDString>(Type->getOperand(1));
+ uint64_t TypeIdVal = llvm::MD5Hash(MDGeneralizedTypeId->getString());
+ IntegerType *Int64Ty = Type::getInt64Ty(F.getContext());
+ return ConstantInt::get(Int64Ty, TypeIdVal);
+ }
+ }
+ return nullptr;
+}
+
+/// Emits .callgraph section.
+void AsmPrinter::emitCallGraphSection(const MachineFunction &MF,
+ FunctionInfo &FuncInfo) {
+ if (!MF.getTarget().Options.EmitCallGraphSection)
+ return;
+
+ // Switch to the call graph section for the function
+ MCSection *FuncCGSection =
+ getObjFileLowering().getCallGraphSection(*getCurrentSection());
+ assert(FuncCGSection && "null callgraph section");
+ OutStreamer->pushSection();
+ OutStreamer->switchSection(FuncCGSection);
+
+ // Emit format version number.
+ OutStreamer->emitInt64(CallGraphSectionFormatVersion::V_0);
+
+ // Emit function's self information, which is composed of:
+ // 1) FunctionEntryPc
+ // 2) FunctionKind: Whether the function is indirect target, and if so,
+ // whether its type id is known.
+ // 3) FunctionTypeId: Emit only when the function is an indirect target
+ // and its type id is known.
+
+ // Emit function entry pc.
+ const MCSymbol *FunctionSymbol = getFunctionBegin();
+ OutStreamer->emitSymbolValue(FunctionSymbol, TM.getProgramPointerSize());
+
+ // If this function has external linkage or has its address taken and
+ // it is not a callback, then anything could call it.
+ const Function &F = MF.getFunction();
+ bool IsIndirectTarget =
+ !F.hasLocalLinkage() || F.hasAddressTaken(nullptr,
+ /*IgnoreCallbackUses=*/true,
+ /*IgnoreAssumeLikeCalls=*/true,
+ /*IgnoreLLVMUsed=*/false);
+
+ // FIXME: FunctionKind takes a few values but emitted as a 64-bit value.
+ // Can be optimized to occupy 2 bits instead.
+ // Emit function kind, and type id if available.
+ if (!IsIndirectTarget) {
+ OutStreamer->emitInt64(
+ static_cast<uint64_t>(FunctionInfo::FunctionKind::NOT_INDIRECT_TARGET));
+ } else {
+ if (const auto *TypeId = extractNumericCGTypeId(F)) {
+ OutStreamer->emitInt64(static_cast<uint64_t>(
+ FunctionInfo::FunctionKind::INDIRECT_TARGET_KNOWN_TID));
+ OutStreamer->emitInt64(TypeId->getZExtValue());
+ } else {
+ OutStreamer->emitInt64(static_cast<uint64_t>(
+ FunctionInfo::FunctionKind::INDIRECT_TARGET_UNKNOWN_TID));
+ }
+ }
+
+ // Emit callsite labels, where each element is a pair of type id and
+ // indirect callsite pc.
+ const auto &CallSiteLabels = FuncInfo.CallSiteLabels;
+ OutStreamer->emitInt64(CallSiteLabels.size());
+ for (const auto &[TypeId, Label] : CallSiteLabels) {
+ OutStreamer->emitInt64(TypeId);
+ OutStreamer->emitSymbolValue(Label, TM.getProgramPointerSize());
+ }
+ FuncInfo.CallSiteLabels.clear();
+
+ OutStreamer->popSection();
+}
+
void AsmPrinter::emitPCSectionsLabel(const MachineFunction &MF,
const MDNode &MD) {
MCSymbol *S = MF.getContext().createTempSymbol("pcsection");
@@ -1784,6 +1866,23 @@ static StringRef getMIMnemonic(const MachineInstr &MI, MCStreamer &Streamer) {
return Name;
}
+void AsmPrinter::emitIndirectCalleeLabels(
+ FunctionInfo &FuncInfo,
+ const MachineFunction::CallSiteInfoMap &CallSitesInfoMap,
+ const MachineInstr &MI) {
+ // Only indirect calls have type identifiers set.
+ const auto &CallSiteInfo = CallSitesInfoMap.find(&MI);
+ if (CallSiteInfo == CallSitesInfoMap.end())
+ return;
+
+ for (ConstantInt *CalleeTypeId : CallSiteInfo->second.CalleeTypeIds) {
+ MCSymbol *S = MF->getContext().createTempSymbol();
+ OutStreamer->emitLabel(S);
+ uint64_t CalleeTypeIdVal = CalleeTypeId->getZExtValue();
+ FuncInfo.CallSiteLabels.emplace_back(CalleeTypeIdVal, S);
+ }
+}
+
/// EmitFunctionBody - This method emits the body and trailer for a
/// function.
void AsmPrinter::emitFunctionBody() {
@@ -1830,6 +1929,8 @@ void AsmPrinter::emitFunctionBody() {
MBBSectionRanges[MF->front().getSectionID()] =
MBBSectionRange{CurrentFnBegin, nullptr};
+ FunctionInfo FuncInfo;
+ const auto &CallSitesInfoMap = MF->getCallSitesInfo();
for (auto &MBB : *MF) {
// Print a label for the basic block.
emitBasicBlockStart(MBB);
@@ -1963,6 +2064,9 @@ void AsmPrinter::emitFunctionBody() {
break;
}
+ if (TM.Options.EmitCallGraphSection && MI.isCall())
+ emitIndirectCalleeLabels(FuncInfo, CallSitesInfoMap, MI);
+
// If there is a post-instruction symbol, emit a label for it here.
if (MCSymbol *S = MI.getPostInstrSymbol())
OutStreamer->emitLabel(S);
@@ -2142,6 +2246,9 @@ void AsmPrinter::emitFunctionBody() {
// Emit section containing stack size metadata.
emitStackSizeSection(*MF);
+ // Emit section containing call graph metadata.
+ emitCallGraphSection(*MF, FuncInfo);
+
// Emit .su file containing function stack size information.
emitStackUsage(*MF);
@@ -2841,6 +2948,7 @@ void AsmPrinter::SetupMachineFunction(MachineFunction &MF) {
F.hasFnAttribute("xray-instruction-threshold") ||
needFuncLabels(MF, *this) || NeedsLocalForSize ||
MF.getTarget().Options.EmitStackSizeSection ||
+ MF.getTarget().Options.EmitCallGraphSection ||
MF.getTarget().Options.BBAddrMap) {
CurrentFnBegin = createTempSymbol("func_begin");
if (NeedsLocalForSize)
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 416c56d..f16283b 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2769,6 +2769,29 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
return optimizeGatherScatterInst(II, II->getArgOperand(0));
case Intrinsic::masked_scatter:
return optimizeGatherScatterInst(II, II->getArgOperand(1));
+ case Intrinsic::masked_load:
+ // Treat v1X masked load as load X type.
+ if (auto *VT = dyn_cast<FixedVectorType>(II->getType())) {
+ if (VT->getNumElements() == 1) {
+ Value *PtrVal = II->getArgOperand(0);
+ unsigned AS = PtrVal->getType()->getPointerAddressSpace();
+ if (optimizeMemoryInst(II, PtrVal, VT->getElementType(), AS))
+ return true;
+ }
+ }
+ return false;
+ case Intrinsic::masked_store:
+ // Treat v1X masked store as store X type.
+ if (auto *VT =
+ dyn_cast<FixedVectorType>(II->getArgOperand(0)->getType())) {
+ if (VT->getNumElements() == 1) {
+ Value *PtrVal = II->getArgOperand(1);
+ unsigned AS = PtrVal->getType()->getPointerAddressSpace();
+ if (optimizeMemoryInst(II, PtrVal, VT->getElementType(), AS))
+ return true;
+ }
+ }
+ return false;
}
SmallVector<Value *, 2> PtrOps;
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 012d873..9ba1782 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -1009,7 +1009,8 @@ void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
for (unsigned I = 0; I < NumValues; ++I) {
Register Addr;
- MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
+ MIRBuilder.materializeObjectPtrOffset(Addr, DemoteReg, OffsetLLTy,
+ Offsets[I]);
auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
MRI.getType(VRegs[I]),
commonAlignment(BaseAlign, Offsets[I]));
@@ -1039,7 +1040,8 @@ void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
for (unsigned I = 0; I < NumValues; ++I) {
Register Addr;
- MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
+ MIRBuilder.materializeObjectPtrOffset(Addr, DemoteReg, OffsetLLTy,
+ Offsets[I]);
auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
MRI.getType(VRegs[I]),
commonAlignment(BaseAlign, Offsets[I]));
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index dc5dfab..fd38c30 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1409,7 +1409,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
for (unsigned i = 0; i < Regs.size(); ++i) {
Register Addr;
- MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
+ MIRBuilder.materializeObjectPtrOffset(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
Align BaseAlign = getMemOpAlign(LI);
@@ -1448,7 +1448,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
for (unsigned i = 0; i < Vals.size(); ++i) {
Register Addr;
- MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
+ MIRBuilder.materializeObjectPtrOffset(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
Align BaseAlign = getMemOpAlign(SI);
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 538a763..d9d3569 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -4170,7 +4170,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
auto OffsetCst = MIRBuilder.buildConstant(LLT::scalar(PtrTy.getSizeInBits()),
LargeSplitSize / 8);
Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
- auto SmallPtr = MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst);
+ auto SmallPtr = MIRBuilder.buildObjectPtrOffset(PtrAddReg, PtrReg, OffsetCst);
auto SmallLoad = MIRBuilder.buildLoadInstr(LoadMI.getOpcode(), AnyExtTy,
SmallPtr, *SmallMMO);
@@ -4277,8 +4277,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerStore(GStore &StoreMI) {
LLT PtrTy = MRI.getType(PtrReg);
auto OffsetCst = MIRBuilder.buildConstant(
LLT::scalar(PtrTy.getSizeInBits()), LargeSplitSize / 8);
- auto SmallPtr =
- MIRBuilder.buildPtrAdd(PtrTy, PtrReg, OffsetCst);
+ auto SmallPtr = MIRBuilder.buildObjectPtrOffset(PtrTy, PtrReg, OffsetCst);
MachineMemOperand *LargeMMO =
MF.getMachineMemOperand(&MMO, 0, LargeSplitSize / 8);
@@ -5349,7 +5348,8 @@ LegalizerHelper::reduceLoadStoreWidth(GLoadStore &LdStMI, unsigned TypeIdx,
unsigned ByteOffset = Offset / 8;
Register NewAddrReg;
- MIRBuilder.materializePtrAdd(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
+ MIRBuilder.materializeObjectPtrOffset(NewAddrReg, AddrReg, OffsetTy,
+ ByteOffset);
MachineMemOperand *NewMMO =
MF.getMachineMemOperand(&MMO, ByteOffset, PartTy);
@@ -9822,7 +9822,7 @@ LegalizerHelper::lowerMemset(MachineInstr &MI, Register Dst, Register Val,
if (DstOff != 0) {
auto Offset =
MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
- Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
+ Ptr = MIB.buildObjectPtrOffset(PtrTy, Dst, Offset).getReg(0);
}
MIB.buildStore(Value, Ptr, *StoreMMO);
@@ -9962,7 +9962,7 @@ LegalizerHelper::lowerMemcpy(MachineInstr &MI, Register Dst, Register Src,
LLT SrcTy = MRI.getType(Src);
Offset = MIB.buildConstant(LLT::scalar(SrcTy.getSizeInBits()), CurrOffset)
.getReg(0);
- LoadPtr = MIB.buildPtrAdd(SrcTy, Src, Offset).getReg(0);
+ LoadPtr = MIB.buildObjectPtrOffset(SrcTy, Src, Offset).getReg(0);
}
auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
@@ -9970,7 +9970,7 @@ LegalizerHelper::lowerMemcpy(MachineInstr &MI, Register Dst, Register Src,
Register StorePtr = Dst;
if (CurrOffset != 0) {
LLT DstTy = MRI.getType(Dst);
- StorePtr = MIB.buildPtrAdd(DstTy, Dst, Offset).getReg(0);
+ StorePtr = MIB.buildObjectPtrOffset(DstTy, Dst, Offset).getReg(0);
}
MIB.buildStore(LdVal, StorePtr, *StoreMMO);
CurrOffset += CopyTy.getSizeInBytes();
@@ -10060,7 +10060,7 @@ LegalizerHelper::lowerMemmove(MachineInstr &MI, Register Dst, Register Src,
LLT SrcTy = MRI.getType(Src);
auto Offset =
MIB.buildConstant(LLT::scalar(SrcTy.getSizeInBits()), CurrOffset);
- LoadPtr = MIB.buildPtrAdd(SrcTy, Src, Offset).getReg(0);
+ LoadPtr = MIB.buildObjectPtrOffset(SrcTy, Src, Offset).getReg(0);
}
LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
CurrOffset += CopyTy.getSizeInBytes();
@@ -10078,7 +10078,7 @@ LegalizerHelper::lowerMemmove(MachineInstr &MI, Register Dst, Register Src,
LLT DstTy = MRI.getType(Dst);
auto Offset =
MIB.buildConstant(LLT::scalar(DstTy.getSizeInBits()), CurrOffset);
- StorePtr = MIB.buildPtrAdd(DstTy, Dst, Offset).getReg(0);
+ StorePtr = MIB.buildObjectPtrOffset(DstTy, Dst, Offset).getReg(0);
}
MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
CurrOffset += CopyTy.getSizeInBytes();
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 121d7e8..27df7e3 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -208,11 +208,20 @@ MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
}
+MachineInstrBuilder MachineIRBuilder::buildObjectPtrOffset(const DstOp &Res,
+ const SrcOp &Op0,
+ const SrcOp &Op1) {
+ return buildPtrAdd(Res, Op0, Op1,
+ MachineInstr::MIFlag::NoUWrap |
+ MachineInstr::MIFlag::InBounds);
+}
+
std::optional<MachineInstrBuilder>
MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
- const LLT ValueTy, uint64_t Value) {
+ const LLT ValueTy, uint64_t Value,
+ std::optional<unsigned> Flags) {
assert(Res == 0 && "Res is a result argument");
- assert(ValueTy.isScalar() && "invalid offset type");
+ assert(ValueTy.isScalar() && "invalid offset type");
if (Value == 0) {
Res = Op0;
@@ -221,7 +230,14 @@ MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
auto Cst = buildConstant(ValueTy, Value);
- return buildPtrAdd(Res, Op0, Cst.getReg(0));
+ return buildPtrAdd(Res, Op0, Cst.getReg(0), Flags);
+}
+
+std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
+ Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
+ return materializePtrAdd(Res, Op0, ValueTy, Value,
+ MachineInstr::MIFlag::NoUWrap |
+ MachineInstr::MIFlag::InBounds);
}
MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.cpp b/llvm/lib/CodeGen/MIRParser/MILexer.cpp
index 193df1f..8b72c29 100644
--- a/llvm/lib/CodeGen/MIRParser/MILexer.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MILexer.cpp
@@ -217,6 +217,7 @@ static MIToken::TokenKind getIdentifierKind(StringRef Identifier) {
.Case("nneg", MIToken::kw_nneg)
.Case("disjoint", MIToken::kw_disjoint)
.Case("samesign", MIToken::kw_samesign)
+ .Case("inbounds", MIToken::kw_inbounds)
.Case("nofpexcept", MIToken::kw_nofpexcept)
.Case("unpredictable", MIToken::kw_unpredictable)
.Case("debug-location", MIToken::kw_debug_location)
diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.h b/llvm/lib/CodeGen/MIRParser/MILexer.h
index 54142ac..0627f17 100644
--- a/llvm/lib/CodeGen/MIRParser/MILexer.h
+++ b/llvm/lib/CodeGen/MIRParser/MILexer.h
@@ -78,6 +78,7 @@ struct MIToken {
kw_nneg,
kw_disjoint,
kw_samesign,
+ kw_inbounds,
kw_debug_location,
kw_debug_instr_number,
kw_dbg_instr_ref,
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 807d59c..6a464d9 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1477,7 +1477,8 @@ bool MIParser::parseInstruction(unsigned &OpCode, unsigned &Flags) {
Token.is(MIToken::kw_nneg) ||
Token.is(MIToken::kw_disjoint) ||
Token.is(MIToken::kw_nusw) ||
- Token.is(MIToken::kw_samesign)) {
+ Token.is(MIToken::kw_samesign) ||
+ Token.is(MIToken::kw_inbounds)) {
// clang-format on
// Mine frame and fast math flags
if (Token.is(MIToken::kw_frame_setup))
@@ -1518,6 +1519,8 @@ bool MIParser::parseInstruction(unsigned &OpCode, unsigned &Flags) {
Flags |= MachineInstr::NoUSWrap;
if (Token.is(MIToken::kw_samesign))
Flags |= MachineInstr::SameSign;
+ if (Token.is(MIToken::kw_inbounds))
+ Flags |= MachineInstr::InBounds;
lex();
}
diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp
index ad7835a..ce1834a 100644
--- a/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -820,6 +820,8 @@ static void printMI(raw_ostream &OS, MFPrintState &State,
OS << "nusw ";
if (MI.getFlag(MachineInstr::SameSign))
OS << "samesign ";
+ if (MI.getFlag(MachineInstr::InBounds))
+ OS << "inbounds ";
// NOTE: Please add new MIFlags also to the MI_FLAGS_STR in
// llvm/utils/update_mir_test_checks.py.
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index 60d42e0..ec40f6a 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -698,6 +698,26 @@ bool MachineFunction::needsFrameMoves() const {
!F.getParent()->debug_compile_units().empty();
}
+MachineFunction::CallSiteInfo::CallSiteInfo(const CallBase &CB) {
+ // Numeric callee_type ids are only for indirect calls.
+ if (!CB.isIndirectCall())
+ return;
+
+ MDNode *CalleeTypeList = CB.getMetadata(LLVMContext::MD_callee_type);
+ if (!CalleeTypeList)
+ return;
+
+ for (const MDOperand &Op : CalleeTypeList->operands()) {
+ MDNode *TypeMD = cast<MDNode>(Op);
+ MDString *TypeIdStr = cast<MDString>(TypeMD->getOperand(1));
+ // Compute numeric type id from generalized type id string
+ uint64_t TypeIdVal = MD5Hash(TypeIdStr->getString());
+ IntegerType *Int64Ty = Type::getInt64Ty(CB.getContext());
+ CalleeTypeIds.push_back(
+ ConstantInt::get(Int64Ty, TypeIdVal, /*IsSigned=*/false));
+ }
+}
+
namespace llvm {
template<>
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index da3665b..79047f7 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -585,6 +585,8 @@ uint32_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
MIFlags |= MachineInstr::MIFlag::NoUSWrap;
if (GEP->hasNoUnsignedWrap())
MIFlags |= MachineInstr::MIFlag::NoUWrap;
+ if (GEP->isInBounds())
+ MIFlags |= MachineInstr::MIFlag::InBounds;
}
// Copy the nonneg flag.
@@ -1860,8 +1862,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
OS << "nneg ";
if (getFlag(MachineInstr::Disjoint))
OS << "disjoint ";
+ if (getFlag(MachineInstr::NoUSWrap))
+ OS << "nusw ";
if (getFlag(MachineInstr::SameSign))
OS << "samesign ";
+ if (getFlag(MachineInstr::InBounds))
+ OS << "inbounds ";
// Print the opcode name.
if (TII)
diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp
index 0f742c4..21bf052 100644
--- a/llvm/lib/CodeGen/ModuloSchedule.cpp
+++ b/llvm/lib/CodeGen/ModuloSchedule.cpp
@@ -423,7 +423,7 @@ void ModuloScheduleExpander::generateExistingPhis(
// potentially define two values.
unsigned MaxPhis = PrologStage + 2;
if (!InKernel && (int)PrologStage <= LoopValStage)
- MaxPhis = std::max((int)MaxPhis - (int)LoopValStage, 1);
+ MaxPhis = std::max((int)MaxPhis - LoopValStage, 1);
unsigned NumPhis = std::min(NumStages, MaxPhis);
Register NewReg;
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 2d7987a..7ede564 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -306,7 +306,12 @@ class RegisterCoalescer : private LiveRangeEdit::Delegate {
/// number if it is not zero. If DstReg is a physical register and the
/// existing subregister number of the def / use being updated is not zero,
/// make sure to set it to the correct physical subregister.
- void updateRegDefsUses(Register SrcReg, Register DstReg, unsigned SubIdx);
+ ///
+ /// If \p SubregToRegSrcInst is not empty, we are coalescing a
+ /// `DstReg = SUBREG_TO_REG SrcReg`, which should introduce an
+ /// implicit-def of DstReg on instructions that define SrcReg.
+ void updateRegDefsUses(Register SrcReg, Register DstReg, unsigned SubIdx,
+ ArrayRef<MachineInstr *> SubregToRegSrcInst = {});
/// If the given machine operand reads only undefined lanes add an undef
/// flag.
@@ -1443,6 +1448,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// CopyMI may have implicit operands, save them so that we can transfer them
// over to the newly materialized instruction after CopyMI is removed.
+ LaneBitmask NewMIImplicitOpsMask;
SmallVector<MachineOperand, 4> ImplicitOps;
ImplicitOps.reserve(CopyMI->getNumOperands() -
CopyMI->getDesc().getNumOperands());
@@ -1457,6 +1463,9 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
(MO.getSubReg() == 0 && MO.getReg() == DstOperand.getReg())) &&
"unexpected implicit virtual register def");
ImplicitOps.push_back(MO);
+ if (MO.isDef() && MO.getReg().isVirtual() &&
+ MRI->shouldTrackSubRegLiveness(DstReg))
+ NewMIImplicitOpsMask |= MRI->getMaxLaneMaskForVReg(MO.getReg());
}
}
@@ -1499,14 +1508,11 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
} else {
assert(MO.getReg() == NewMI.getOperand(0).getReg());
- // We're only expecting another def of the main output, so the range
- // should get updated with the regular output range.
- //
- // FIXME: The range updating below probably needs updating to look at
- // the super register if subranges are tracked.
- assert(!MRI->shouldTrackSubRegLiveness(DstReg) &&
- "subrange update for implicit-def of super register may not be "
- "properly handled");
+ // If lanemasks need to be tracked, compile the lanemask of the NewMI
+ // implicit def operands to avoid subranges for the super-regs from
+ // being removed by code later on in this function.
+ if (MRI->shouldTrackSubRegLiveness(MO.getReg()))
+ NewMIImplicitOpsMask |= MRI->getMaxLaneMaskForVReg(MO.getReg());
}
}
}
@@ -1606,7 +1612,8 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
CurrIdx.getRegSlot(NewMI.getOperand(0).isEarlyClobber());
VNInfo::Allocator &Alloc = LIS->getVNInfoAllocator();
for (LiveInterval::SubRange &SR : DstInt.subranges()) {
- if ((SR.LaneMask & DstMask).none()) {
+ if ((SR.LaneMask & DstMask).none() &&
+ (SR.LaneMask & NewMIImplicitOpsMask).none()) {
LLVM_DEBUG(dbgs()
<< "Removing undefined SubRange "
<< PrintLaneMask(SR.LaneMask) << " : " << SR << "\n");
@@ -1870,11 +1877,14 @@ void RegisterCoalescer::addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx,
}
}
-void RegisterCoalescer::updateRegDefsUses(Register SrcReg, Register DstReg,
- unsigned SubIdx) {
+void RegisterCoalescer::updateRegDefsUses(
+ Register SrcReg, Register DstReg, unsigned SubIdx,
+ ArrayRef<MachineInstr *> SubregToRegSrcInsts) {
bool DstIsPhys = DstReg.isPhysical();
LiveInterval *DstInt = DstIsPhys ? nullptr : &LIS->getInterval(DstReg);
+ // Coalescing a COPY may expose reads of 'undef' subregisters.
+ // If so, then explicitly propagate 'undef' to those operands.
if (DstInt && DstInt->hasSubRanges() && DstReg != SrcReg) {
for (MachineOperand &MO : MRI->reg_operands(DstReg)) {
if (MO.isUndef())
@@ -1891,6 +1901,15 @@ void RegisterCoalescer::updateRegDefsUses(Register SrcReg, Register DstReg,
}
}
+ // If DstInt already has a subrange for the unused lanes, then we shouldn't
+ // create duplicate subranges when we update the interval for unused lanes.
+ LaneBitmask DstIntLaneMask;
+ if (DstInt && MRI->shouldTrackSubRegLiveness(DstReg)) {
+ for (LiveInterval::SubRange &SR : DstInt->subranges())
+ DstIntLaneMask |= SR.LaneMask;
+ }
+
+ // Go through all instructions to replace uses of 'SrcReg' by 'DstReg'.
SmallPtrSet<MachineInstr *, 8> Visited;
for (MachineRegisterInfo::reg_instr_iterator I = MRI->reg_instr_begin(SrcReg),
E = MRI->reg_instr_end();
@@ -1914,6 +1933,80 @@ void RegisterCoalescer::updateRegDefsUses(Register SrcReg, Register DstReg,
if (DstInt && !Reads && SubIdx && !UseMI->isDebugInstr())
Reads = DstInt->liveAt(LIS->getInstructionIndex(*UseMI));
+ bool RequiresImplicitRedef = false;
+ if (!SubregToRegSrcInsts.empty()) {
+ // We can only add an implicit-def and undef if the sub registers match,
+ // e.g.
+ // %0:gr32 = INSTX
+ // %0.sub8:gr32 = INSTY // top 24 bits of %0 still defined
+ // %1:gr64 = SUBREG_TO_REG 0, %0, %subreg.sub32
+ //
+ // This cannot be transformed into:
+ // %1.sub32:gr64 = INSTX
+ // undef %1.sub8:gr64 = INSTY , implicit-def %1
+ //
+ // Because that would thrash the top 24 bits of %1.sub32.
+ if (is_contained(SubregToRegSrcInsts, UseMI) &&
+ all_of(UseMI->defs(),
+ [&SubIdx, &SrcReg](const MachineOperand &MO) -> bool {
+ if (MO.getReg() != SrcReg || !MO.getSubReg() || MO.isUndef())
+ return true;
+ return SubIdx == MO.getSubReg();
+ })) {
+ // Add implicit-def of super-register to express that the whole
+ // register is defined by the instruction.
+ MachineInstrBuilder MIB(*MF, UseMI);
+ MIB.addReg(DstReg, RegState::ImplicitDefine);
+ RequiresImplicitRedef = true;
+ }
+
+ // If the coalesed instruction doesn't fully define the register, we need
+ // to preserve the original super register liveness for SUBREG_TO_REG.
+ //
+ // We pretended SUBREG_TO_REG was a regular copy for coalescing purposes,
+ // but it introduces liveness for other subregisters. Downstream users may
+ // have been relying on those bits, so we need to ensure their liveness is
+ // captured with a def of other lanes.
+ if (DstInt && MRI->shouldTrackSubRegLiveness(DstReg)) {
+ // First check if there is sufficient granularity in terms of subranges.
+ LaneBitmask DstMask = MRI->getMaxLaneMaskForVReg(DstInt->reg());
+ LaneBitmask UsedLanes = TRI->getSubRegIndexLaneMask(SubIdx);
+ LaneBitmask UnusedLanes = DstMask & ~UsedLanes;
+ if ((UnusedLanes & ~DstIntLaneMask).any()) {
+ BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
+ DstInt->createSubRangeFrom(Allocator, UnusedLanes, *DstInt);
+ DstIntLaneMask |= UnusedLanes;
+ }
+
+ // After duplicating the live ranges for the low/hi bits, we
+ // need to update the subranges of the DstReg interval such that
+ // for a case like this:
+ //
+ // entry:
+ // 16B %1:gpr32 = INSTRUCTION (<=> UseMI)
+ // :
+ // if.then:
+ // 32B %1:gpr32 = MOVIMM32 ..
+ // 48B %0:gpr64 = SUBREG_TO_REG 0, %1, sub32
+ //
+ // Only the MOVIMM32 require a def of the top lanes and any intervals
+ // for the top 32-bits of the def at 16B should be removed.
+ for (LiveInterval::SubRange &SR : DstInt->subranges()) {
+ if (!Writes || RequiresImplicitRedef ||
+ (SR.LaneMask & UnusedLanes).none())
+ continue;
+
+ assert((SR.LaneMask & UnusedLanes) == SR.LaneMask &&
+ "Unexpected lanemask. Subrange needs finer granularity");
+
+ SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI).getRegSlot(false);
+ auto SegmentI = SR.find(UseIdx);
+ if (SegmentI != SR.end())
+ SR.removeSegment(SegmentI, true);
+ }
+ }
+ }
+
// Replace SrcReg with DstReg in all UseMI operands.
for (unsigned Op : Ops) {
MachineOperand &MO = UseMI->getOperand(Op);
@@ -1922,7 +2015,7 @@ void RegisterCoalescer::updateRegDefsUses(Register SrcReg, Register DstReg,
// turn a full def into a read-modify-write sub-register def and vice
// versa.
if (SubIdx && MO.isDef())
- MO.setIsUndef(!Reads);
+ MO.setIsUndef(!Reads || RequiresImplicitRedef);
// A subreg use of a partially undef (super) register may be a complete
// undef use now and then has to be marked that way.
@@ -2025,6 +2118,30 @@ void RegisterCoalescer::setUndefOnPrunedSubRegUses(LiveInterval &LI,
LIS->shrinkToUses(&LI);
}
+/// For a given use of value \p Idx, it returns the def in the current block,
+/// or otherwise all possible defs in preceding blocks.
+static bool FindDefInBlock(SmallPtrSetImpl<MachineBasicBlock *> &VisitedBlocks,
+ SmallVector<MachineInstr *> &Instrs,
+ LiveIntervals *LIS, LiveInterval &SrcInt,
+ MachineBasicBlock *MBB, VNInfo *Idx) {
+ if (!Idx->isPHIDef()) {
+ MachineInstr *Def = LIS->getInstructionFromIndex(Idx->def);
+ assert(Def && "Unable to find a def for SUBREG_TO_REG source operand");
+ Instrs.push_back(Def);
+ return true;
+ }
+
+ bool Any = false;
+ if (VisitedBlocks.count(MBB))
+ return false;
+ VisitedBlocks.insert(MBB);
+ for (MachineBasicBlock *Pred : MBB->predecessors()) {
+ Any |= FindDefInBlock(VisitedBlocks, Instrs, LIS, SrcInt, Pred,
+ SrcInt.getVNInfoBefore(LIS->getMBBEndIdx(Pred)));
+ }
+ return Any;
+}
+
bool RegisterCoalescer::joinCopy(
MachineInstr *CopyMI, bool &Again,
SmallPtrSetImpl<MachineInstr *> &CurrentErasedInstrs) {
@@ -2156,6 +2273,35 @@ bool RegisterCoalescer::joinCopy(
});
}
+ SmallVector<MachineInstr *> SubregToRegSrcInsts;
+ if (CopyMI->isSubregToReg()) {
+ // For the case where the copy instruction is a SUBREG_TO_REG, e.g.
+ //
+ // %0:gpr32 = movimm32 ..
+ // %1:gpr64 = SUBREG_TO_REG 0, %0, sub32
+ // ...
+ // %0:gpr32 = COPY <something>
+ //
+ // After joining liveranges, the original `movimm32` will need an
+ // implicit-def to make it explicit that the entire register is written,
+ // i.e.
+ //
+ // undef %0.sub32:gpr64 = movimm32 ..., implicit-def %0
+ // ...
+ // undef %0.sub32:gpr64 = COPY <something> // Note that this does not
+ // // require an implicit-def,
+ // // because it has nothing to
+ // // do with the SUBREG_TO_REG.
+ LiveInterval &SrcInt =
+ LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
+ SlotIndex SubregToRegSlotIdx = LIS->getInstructionIndex(*CopyMI);
+ SmallPtrSet<MachineBasicBlock *, 8> VisitedBlocks;
+ if (!FindDefInBlock(VisitedBlocks, SubregToRegSrcInsts, LIS, SrcInt,
+ CopyMI->getParent(),
+ SrcInt.Query(SubregToRegSlotIdx).valueIn()))
+ llvm_unreachable("SUBREG_TO_REG src requires a def");
+ }
+
ShrinkMask = LaneBitmask::getNone();
ShrinkMainRange = false;
@@ -2225,9 +2371,12 @@ bool RegisterCoalescer::joinCopy(
// Rewrite all SrcReg operands to DstReg.
// Also update DstReg operands to include DstIdx if it is set.
- if (CP.getDstIdx())
+ if (CP.getDstIdx()) {
+ assert(SubregToRegSrcInsts.empty() && "can this happen?");
updateRegDefsUses(CP.getDstReg(), CP.getDstReg(), CP.getDstIdx());
- updateRegDefsUses(CP.getSrcReg(), CP.getDstReg(), CP.getSrcIdx());
+ }
+ updateRegDefsUses(CP.getSrcReg(), CP.getDstReg(), CP.getSrcIdx(),
+ SubregToRegSrcInsts);
// Shrink subregister ranges if necessary.
if (ShrinkMask.any()) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 251682a..a43020e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -15263,23 +15263,31 @@ SDValue DAGCombiner::visitAssertExt(SDNode *N) {
}
}
- // If we have (AssertZext (and (AssertSext X, iX), M), iY) and Y is smaller
- // than X, and the And doesn't change the lower iX bits, we can move the
- // AssertZext in front of the And and drop the AssertSext.
if (Opcode == ISD::AssertZext && N0.getOpcode() == ISD::AND &&
- N0.hasOneUse() && N0.getOperand(0).getOpcode() == ISD::AssertSext &&
isa<ConstantSDNode>(N0.getOperand(1))) {
- SDValue BigA = N0.getOperand(0);
- EVT BigA_AssertVT = cast<VTSDNode>(BigA.getOperand(1))->getVT();
const APInt &Mask = N0.getConstantOperandAPInt(1);
- if (AssertVT.bitsLT(BigA_AssertVT) &&
- Mask.countr_one() >= BigA_AssertVT.getScalarSizeInBits()) {
- SDLoc DL(N);
- SDValue NewAssert =
- DAG.getNode(Opcode, DL, N->getValueType(0), BigA.getOperand(0), N1);
- return DAG.getNode(ISD::AND, DL, N->getValueType(0), NewAssert,
- N0.getOperand(1));
+
+ // If we have (AssertZext (and (AssertSext X, iX), M), iY) and Y is smaller
+ // than X, and the And doesn't change the lower iX bits, we can move the
+ // AssertZext in front of the And and drop the AssertSext.
+ if (N0.getOperand(0).getOpcode() == ISD::AssertSext && N0.hasOneUse()) {
+ SDValue BigA = N0.getOperand(0);
+ EVT BigA_AssertVT = cast<VTSDNode>(BigA.getOperand(1))->getVT();
+ if (AssertVT.bitsLT(BigA_AssertVT) &&
+ Mask.countr_one() >= BigA_AssertVT.getScalarSizeInBits()) {
+ SDLoc DL(N);
+ SDValue NewAssert =
+ DAG.getNode(Opcode, DL, N->getValueType(0), BigA.getOperand(0), N1);
+ return DAG.getNode(ISD::AND, DL, N->getValueType(0), NewAssert,
+ N0.getOperand(1));
+ }
}
+
+ // Remove AssertZext entirely if the mask guarantees the assertion cannot
+ // fail.
+ // TODO: Use KB countMinLeadingZeros to handle non-constant masks?
+ if (Mask.isIntN(AssertVT.getScalarSizeInBits()))
+ return N0;
}
return SDValue();
@@ -28974,13 +28982,100 @@ SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
return SDValue();
}
+static SDValue matchMergedBFX(SDValue Root, SelectionDAG &DAG,
+ const TargetLowering &TLI) {
+ // Match a pattern such as:
+ // (X | (X >> C0) | (X >> C1) | ...) & Mask
+ // This extracts contiguous parts of X and ORs them together before comparing.
+ // We can optimize this so that we directly check (X & SomeMask) instead,
+ // eliminating the shifts.
+
+ EVT VT = Root.getValueType();
+
+ // TODO: Support vectors?
+ if (!VT.isScalarInteger() || Root.getOpcode() != ISD::AND)
+ return SDValue();
+
+ SDValue N0 = Root.getOperand(0);
+ SDValue N1 = Root.getOperand(1);
+
+ if (N0.getOpcode() != ISD::OR || !isa<ConstantSDNode>(N1))
+ return SDValue();
+
+ APInt RootMask = cast<ConstantSDNode>(N1)->getAsAPIntVal();
+
+ SDValue Src;
+ const auto IsSrc = [&](SDValue V) {
+ if (!Src) {
+ Src = V;
+ return true;
+ }
+
+ return Src == V;
+ };
+
+ SmallVector<SDValue> Worklist = {N0};
+ APInt PartsMask(VT.getSizeInBits(), 0);
+ while (!Worklist.empty()) {
+ SDValue V = Worklist.pop_back_val();
+ if (!V.hasOneUse() && (Src && Src != V))
+ return SDValue();
+
+ if (V.getOpcode() == ISD::OR) {
+ Worklist.push_back(V.getOperand(0));
+ Worklist.push_back(V.getOperand(1));
+ continue;
+ }
+
+ if (V.getOpcode() == ISD::SRL) {
+ SDValue ShiftSrc = V.getOperand(0);
+ SDValue ShiftAmt = V.getOperand(1);
+
+ if (!IsSrc(ShiftSrc) || !isa<ConstantSDNode>(ShiftAmt))
+ return SDValue();
+
+ auto ShiftAmtVal = cast<ConstantSDNode>(ShiftAmt)->getAsZExtVal();
+ if (ShiftAmtVal > RootMask.getBitWidth())
+ return SDValue();
+
+ PartsMask |= (RootMask << ShiftAmtVal);
+ continue;
+ }
+
+ if (IsSrc(V)) {
+ PartsMask |= RootMask;
+ continue;
+ }
+
+ return SDValue();
+ }
+
+ if (!Src)
+ return SDValue();
+
+ SDLoc DL(Root);
+ return DAG.getNode(ISD::AND, DL, VT,
+ {Src, DAG.getConstant(PartsMask, DL, VT)});
+}
+
/// This is a stub for TargetLowering::SimplifySetCC.
SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, const SDLoc &DL,
bool foldBooleans) {
TargetLowering::DAGCombinerInfo
DagCombineInfo(DAG, Level, false, this);
- return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
+ if (SDValue C =
+ TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL))
+ return C;
+
+ if (ISD::isIntEqualitySetCC(Cond) && N0.getOpcode() == ISD::AND &&
+ isNullConstant(N1)) {
+
+ if (SDValue Res = matchMergedBFX(N0, DAG, TLI))
+ return DAG.getSetCC(DL, VT, Res, N1, Cond);
+ }
+
+ return SDValue();
}
/// Given an ISD::SDIV node expressing a divide by constant, return
diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index 6a2e782..31e7855 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -888,7 +888,8 @@ EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
}
if (MI->isCandidateForAdditionalCallInfo()) {
- if (DAG->getTarget().Options.EmitCallSiteInfo)
+ if (DAG->getTarget().Options.EmitCallSiteInfo ||
+ DAG->getTarget().Options.EmitCallGraphSection)
MF.addCallSiteInfo(MI, DAG->getCallSiteInfo(Node));
if (auto CalledGlobal = DAG->getCalledGlobal(Node))
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 5c586f7..f41b6eb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7843,20 +7843,43 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
}
}
- // Perform trivial constant folding.
- if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}, Flags))
- return SV;
+ if (N1.getOpcode() == ISD::POISON || N2.getOpcode() == ISD::POISON) {
+ switch (Opcode) {
+ case ISD::XOR:
+ case ISD::ADD:
+ case ISD::PTRADD:
+ case ISD::SUB:
+ case ISD::SIGN_EXTEND_INREG:
+ case ISD::UDIV:
+ case ISD::SDIV:
+ case ISD::UREM:
+ case ISD::SREM:
+ case ISD::MUL:
+ case ISD::AND:
+ case ISD::SSUBSAT:
+ case ISD::USUBSAT:
+ case ISD::UMIN:
+ case ISD::OR:
+ case ISD::SADDSAT:
+ case ISD::UADDSAT:
+ case ISD::UMAX:
+ case ISD::SMAX:
+ case ISD::SMIN:
+ // fold op(arg1, poison) -> poison, fold op(poison, arg2) -> poison.
+ return N2.getOpcode() == ISD::POISON ? N2 : N1;
+ }
+ }
// Canonicalize an UNDEF to the RHS, even over a constant.
- if (N1.isUndef()) {
+ if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() != ISD::UNDEF) {
if (TLI->isCommutativeBinOp(Opcode)) {
std::swap(N1, N2);
} else {
switch (Opcode) {
case ISD::PTRADD:
case ISD::SUB:
- // fold op(undef, arg2) -> undef, fold op(poison, arg2) ->poison.
- return N1.getOpcode() == ISD::POISON ? getPOISON(VT) : getUNDEF(VT);
+ // fold op(undef, non_undef_arg2) -> undef.
+ return N1;
case ISD::SIGN_EXTEND_INREG:
case ISD::UDIV:
case ISD::SDIV:
@@ -7864,18 +7887,17 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
case ISD::SREM:
case ISD::SSUBSAT:
case ISD::USUBSAT:
- // fold op(undef, arg2) -> 0, fold op(poison, arg2) -> poison.
- return N1.getOpcode() == ISD::POISON ? getPOISON(VT)
- : getConstant(0, DL, VT);
+ // fold op(undef, non_undef_arg2) -> 0.
+ return getConstant(0, DL, VT);
}
}
}
// Fold a bunch of operators when the RHS is undef.
- if (N2.isUndef()) {
+ if (N2.getOpcode() == ISD::UNDEF) {
switch (Opcode) {
case ISD::XOR:
- if (N1.isUndef())
+ if (N1.getOpcode() == ISD::UNDEF)
// Handle undef ^ undef -> 0 special case. This is a common
// idiom (misuse).
return getConstant(0, DL, VT);
@@ -7883,29 +7905,48 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
case ISD::ADD:
case ISD::PTRADD:
case ISD::SUB:
+ // fold op(arg1, undef) -> undef.
+ return N2;
case ISD::UDIV:
case ISD::SDIV:
case ISD::UREM:
case ISD::SREM:
- // fold op(arg1, undef) -> undef, fold op(arg1, poison) -> poison.
- return N2.getOpcode() == ISD::POISON ? getPOISON(VT) : getUNDEF(VT);
+ // fold op(arg1, undef) -> poison.
+ return getPOISON(VT);
case ISD::MUL:
case ISD::AND:
case ISD::SSUBSAT:
case ISD::USUBSAT:
- // fold op(arg1, undef) -> 0, fold op(arg1, poison) -> poison.
- return N2.getOpcode() == ISD::POISON ? getPOISON(VT)
- : getConstant(0, DL, VT);
+ case ISD::UMIN:
+ // fold op(undef, undef) -> undef, fold op(arg1, undef) -> 0.
+ return N1.getOpcode() == ISD::UNDEF ? N2 : getConstant(0, DL, VT);
case ISD::OR:
case ISD::SADDSAT:
case ISD::UADDSAT:
- // fold op(arg1, undef) -> an all-ones constant, fold op(arg1, poison) ->
- // poison.
- return N2.getOpcode() == ISD::POISON ? getPOISON(VT)
- : getAllOnesConstant(DL, VT);
+ case ISD::UMAX:
+ // fold op(undef, undef) -> undef, fold op(arg1, undef) -> -1.
+ return N1.getOpcode() == ISD::UNDEF ? N2 : getAllOnesConstant(DL, VT);
+ case ISD::SMAX:
+ // fold op(undef, undef) -> undef, fold op(arg1, undef) -> MAX_INT.
+ return N1.getOpcode() == ISD::UNDEF
+ ? N2
+ : getConstant(
+ APInt::getSignedMaxValue(VT.getScalarSizeInBits()), DL,
+ VT);
+ case ISD::SMIN:
+ // fold op(undef, undef) -> undef, fold op(arg1, undef) -> MIN_INT.
+ return N1.getOpcode() == ISD::UNDEF
+ ? N2
+ : getConstant(
+ APInt::getSignedMinValue(VT.getScalarSizeInBits()), DL,
+ VT);
}
}
+ // Perform trivial constant folding.
+ if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}, Flags))
+ return SV;
+
// Memoize this node if possible.
SDNode *N;
SDVTList VTs = getVTList(VT);
@@ -12741,7 +12782,7 @@ bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
return Seen;
}
-/// isOperand - Return true if this node is an operand of N.
+/// Return true if the referenced return value is an operand of N.
bool SDValue::isOperandOf(const SDNode *N) const {
return is_contained(N->op_values(), *this);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 1764910..48d6b99 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -9471,7 +9471,7 @@ SDValue TargetLowering::CTTZTableLookup(SDNode *Node, SelectionDAG &DAG,
ISD::SRL, DL, VT,
DAG.getNode(ISD::MUL, DL, VT, DAG.getNode(ISD::AND, DL, VT, Op, Neg),
DAG.getConstant(DeBruijn, DL, VT)),
- DAG.getConstant(ShiftAmt, DL, VT));
+ DAG.getShiftAmountConstant(ShiftAmt, VT, DL));
Lookup = DAG.getSExtOrTrunc(Lookup, DL, getPointerTy(TD));
SmallVector<uint8_t> Table(BitWidth, 0);
diff --git a/llvm/lib/CodeGen/TailDuplicator.cpp b/llvm/lib/CodeGen/TailDuplicator.cpp
index a88c57f..5d720fb 100644
--- a/llvm/lib/CodeGen/TailDuplicator.cpp
+++ b/llvm/lib/CodeGen/TailDuplicator.cpp
@@ -604,12 +604,21 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple,
bool HasComputedGoto = false;
if (!TailBB.empty()) {
HasIndirectbr = TailBB.back().isIndirectBranch();
- HasComputedGoto = TailBB.terminatorIsComputedGoto();
+ HasComputedGoto = TailBB.terminatorIsComputedGotoWithSuccessors();
}
if (HasIndirectbr && PreRegAlloc)
MaxDuplicateCount = TailDupIndirectBranchSize;
+ // Allow higher limits when the block has computed-gotos and running after
+ // register allocation. NB. This basically unfactors computed gotos that were
+ // factored early on in the compilation process to speed up edge based data
+ // flow. If we do not unfactor them again, it can seriously pessimize code
+ // with many computed jumps in the source code, such as interpreters.
+ // Therefore we do not restrict the computed gotos.
+ if (HasComputedGoto && !PreRegAlloc)
+ MaxDuplicateCount = std::max(MaxDuplicateCount, 10u);
+
// Check the instructions in the block to determine whether tail-duplication
// is invalid or unlikely to be profitable.
unsigned InstrCount = 0;
@@ -663,12 +672,7 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple,
// Duplicating a BB which has both multiple predecessors and successors will
// may cause huge amount of PHI nodes. If we want to remove this limitation,
// we have to address https://github.com/llvm/llvm-project/issues/78578.
- // NB. This basically unfactors computed gotos that were factored early on in
- // the compilation process to speed up edge based data flow. If we do not
- // unfactor them again, it can seriously pessimize code with many computed
- // jumps in the source code, such as interpreters. Therefore we do not
- // restrict the computed gotos.
- if (!HasComputedGoto && TailBB.pred_size() > TailDupPredSize &&
+ if (PreRegAlloc && TailBB.pred_size() > TailDupPredSize &&
TailBB.succ_size() > TailDupSuccSize) {
// If TailBB or any of its successors contains a phi, we may have to add a
// large number of additional phis with additional incoming values.
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 18d6bbc..705e046e 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -1406,7 +1406,7 @@ void TargetInstrInfo::reassociateOps(
const MCInstrDesc &MCID, Register DestReg) {
return MachineInstrBuilder(
MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
- .setPCSections(MIMD.getPCSections())
+ .copyMIMetadata(MIMD)
.addReg(DestReg, RegState::Define);
};
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 68b8a00..3c91b0e 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -2062,7 +2062,7 @@ void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
// FreeBSD has "__stack_chk_guard" defined externally on libc.so
if (M.getDirectAccessExternalData() &&
- !TM.getTargetTriple().isWindowsGNUEnvironment() &&
+ !TM.getTargetTriple().isOSCygMing() &&
!(TM.getTargetTriple().isPPC64() &&
TM.getTargetTriple().isOSFreeBSD()) &&
(!TM.getTargetTriple().isOSDarwin() ||
diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 725e951..e9172f4 100644
--- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -1060,27 +1060,27 @@ MCSection *TargetLoweringObjectFileELF::getSectionForConstant(
auto &Context = getContext();
if (Kind.isMergeableConst4() && MergeableConst4Section)
- return Context.getELFSection(".rodata.cst4." + SectionSuffix,
+ return Context.getELFSection(".rodata.cst4." + SectionSuffix + ".",
ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_MERGE, 4);
if (Kind.isMergeableConst8() && MergeableConst8Section)
- return Context.getELFSection(".rodata.cst8." + SectionSuffix,
+ return Context.getELFSection(".rodata.cst8." + SectionSuffix + ".",
ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_MERGE, 8);
if (Kind.isMergeableConst16() && MergeableConst16Section)
- return Context.getELFSection(".rodata.cst16." + SectionSuffix,
+ return Context.getELFSection(".rodata.cst16." + SectionSuffix + ".",
ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_MERGE, 16);
if (Kind.isMergeableConst32() && MergeableConst32Section)
- return Context.getELFSection(".rodata.cst32." + SectionSuffix,
+ return Context.getELFSection(".rodata.cst32." + SectionSuffix + ".",
ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_MERGE, 32);
if (Kind.isReadOnly())
- return Context.getELFSection(".rodata." + SectionSuffix, ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC);
+ return Context.getELFSection(".rodata." + SectionSuffix + ".",
+ ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
assert(Kind.isReadOnlyWithRel() && "Unknown section kind");
- return Context.getELFSection(".data.rel.ro." + SectionSuffix,
+ return Context.getELFSection(".data.rel.ro." + SectionSuffix + ".",
ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_WRITE);
}
diff --git a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
index 222dc88..6ddb12b 100644
--- a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
+++ b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
@@ -413,6 +413,117 @@ static bool isTlsAddressCode(uint8_t DW_OP_Code) {
DW_OP_Code == dwarf::DW_OP_GNU_push_tls_address;
}
+static void constructSeqOffsettoOrigRowMapping(
+ CompileUnit &Unit, const DWARFDebugLine::LineTable &LT,
+ DenseMap<uint64_t, unsigned> &SeqOffToOrigRow) {
+
+ // Use std::map for ordered iteration.
+ std::map<uint64_t, unsigned> LineTableMapping;
+
+ // First, trust the sequences that the DWARF parser did identify.
+ for (const DWARFDebugLine::Sequence &Seq : LT.Sequences)
+ LineTableMapping[Seq.StmtSeqOffset] = Seq.FirstRowIndex;
+
+ // Second, manually find sequence boundaries and match them to the
+ // sorted attributes to handle sequences the parser might have missed.
+ auto StmtAttrs = Unit.getStmtSeqListAttributes();
+ llvm::sort(StmtAttrs, [](const PatchLocation &A, const PatchLocation &B) {
+ return A.get() < B.get();
+ });
+
+ std::vector<unsigned> SeqStartRows;
+ SeqStartRows.push_back(0);
+ for (auto [I, Row] : llvm::enumerate(ArrayRef(LT.Rows).drop_back()))
+ if (Row.EndSequence)
+ SeqStartRows.push_back(I + 1);
+
+ // While SeqOffToOrigRow parsed from CU could be the ground truth,
+ // e.g.
+ //
+ // SeqOff Row
+ // 0x08 9
+ // 0x14 15
+ //
+ // The StmtAttrs and SeqStartRows may not match perfectly, e.g.
+ //
+ // StmtAttrs SeqStartRows
+ // 0x04 3
+ // 0x08 5
+ // 0x10 9
+ // 0x12 11
+ // 0x14 15
+ //
+ // In this case, we don't want to assign 5 to 0x08, since we know 0x08
+ // maps to 9. If we do a dummy 1:1 mapping 0x10 will be mapped to 9
+ // which is incorrect. The expected behavior is ignore 5, realign the
+ // table based on the result from the line table:
+ //
+ // StmtAttrs SeqStartRows
+ // 0x04 3
+ // -- 5
+ // 0x08 9 <- LineTableMapping ground truth
+ // 0x10 11
+ // 0x12 --
+ // 0x14 15 <- LineTableMapping ground truth
+
+ ArrayRef StmtAttrsRef(StmtAttrs);
+ ArrayRef SeqStartRowsRef(SeqStartRows);
+
+ // Dummy last element to make sure StmtAttrsRef and SeqStartRowsRef always
+ // run out first.
+ constexpr uint64_t DummyKey = UINT64_MAX;
+ constexpr unsigned DummyVal = UINT32_MAX;
+ LineTableMapping[DummyKey] = DummyVal;
+
+ for (auto [NextSeqOff, NextRow] : LineTableMapping) {
+ // Explict capture to avoid capturing structured bindings and make C++17
+ // happy.
+ auto StmtAttrSmallerThanNext = [N = NextSeqOff](const PatchLocation &SA) {
+ return SA.get() < N;
+ };
+ auto SeqStartSmallerThanNext = [N = NextRow](const unsigned &Row) {
+ return Row < N;
+ };
+ // If both StmtAttrs and SeqStartRows points to value not in
+ // the LineTableMapping yet, we do a dummy one to one mapping and
+ // move the pointer.
+ while (!StmtAttrsRef.empty() && !SeqStartRowsRef.empty() &&
+ StmtAttrSmallerThanNext(StmtAttrsRef.front()) &&
+ SeqStartSmallerThanNext(SeqStartRowsRef.front())) {
+ SeqOffToOrigRow[StmtAttrsRef.consume_front().get()] =
+ SeqStartRowsRef.consume_front();
+ }
+ // One of the pointer points to the value at or past Next in the
+ // LineTableMapping, We move the pointer to re-align with the
+ // LineTableMapping
+ StmtAttrsRef = StmtAttrsRef.drop_while(StmtAttrSmallerThanNext);
+ SeqStartRowsRef = SeqStartRowsRef.drop_while(SeqStartSmallerThanNext);
+ // Use the LineTableMapping's result as the ground truth and move
+ // on.
+ if (NextSeqOff != DummyKey) {
+ SeqOffToOrigRow[NextSeqOff] = NextRow;
+ }
+ // Move the pointers if they are pointed at Next.
+ // It is possible that they point to later entries in LineTableMapping.
+ // Therefore we only increment the pointers after we validate they are
+ // pointing to the `Next` entry. e.g.
+ //
+ // LineTableMapping
+ // SeqOff Row
+ // 0x08 9 <- NextSeqOff/NextRow
+ // 0x14 15
+ //
+ // StmtAttrs SeqStartRows
+ // 0x14 13 <- StmtAttrsRef.front() / SeqStartRowsRef.front()
+ // 0x16 15
+ // -- 17
+ if (!StmtAttrsRef.empty() && StmtAttrsRef.front().get() == NextSeqOff)
+ StmtAttrsRef.consume_front();
+ if (!SeqStartRowsRef.empty() && SeqStartRowsRef.front() == NextRow)
+ SeqStartRowsRef.consume_front();
+ }
+}
+
std::pair<bool, std::optional<int64_t>>
DWARFLinker::getVariableRelocAdjustment(AddressesMap &RelocMgr,
const DWARFDie &DIE) {
@@ -2297,8 +2408,12 @@ void DWARFLinker::DIECloner::generateLineTableForUnit(CompileUnit &Unit) {
// Create a map of stmt sequence offsets to original row indices.
DenseMap<uint64_t, unsigned> SeqOffToOrigRow;
- for (const DWARFDebugLine::Sequence &Seq : LT->Sequences)
- SeqOffToOrigRow[Seq.StmtSeqOffset] = Seq.FirstRowIndex;
+ // The DWARF parser's discovery of sequences can be incomplete. To
+ // ensure all DW_AT_LLVM_stmt_sequence attributes can be patched, we
+ // build a map from both the parser's results and a manual
+ // reconstruction.
+ if (!LT->Rows.empty())
+ constructSeqOffsettoOrigRowMapping(Unit, *LT, SeqOffToOrigRow);
// Create a map of original row indices to new row indices.
DenseMap<size_t, size_t> OrigRowToNewRow;
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
index bd0d72f..0e95369 100644
--- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -157,8 +157,7 @@ private:
processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
const MachOObjectFile &BaseObj,
ObjSectionToIDMap &ObjSectionToID) {
- const MachOObjectFile &Obj =
- static_cast<const MachOObjectFile&>(BaseObj);
+ const MachOObjectFile &Obj = BaseObj;
MachO::any_relocation_info RE =
Obj.getRelocation(RelI->getRawDataRefImpl());
diff --git a/llvm/lib/Frontend/HLSL/CMakeLists.txt b/llvm/lib/Frontend/HLSL/CMakeLists.txt
index 5343469..3d22577 100644
--- a/llvm/lib/Frontend/HLSL/CMakeLists.txt
+++ b/llvm/lib/Frontend/HLSL/CMakeLists.txt
@@ -1,5 +1,6 @@
add_llvm_component_library(LLVMFrontendHLSL
CBuffer.cpp
+ HLSLBinding.cpp
HLSLResource.cpp
HLSLRootSignature.cpp
RootSignatureMetadata.cpp
diff --git a/llvm/lib/Frontend/HLSL/HLSLBinding.cpp b/llvm/lib/Frontend/HLSL/HLSLBinding.cpp
new file mode 100644
index 0000000..d581311
--- /dev/null
+++ b/llvm/lib/Frontend/HLSL/HLSLBinding.cpp
@@ -0,0 +1,142 @@
+//===- HLSLBinding.cpp - Representation for resource bindings in HLSL -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Frontend/HLSL/HLSLBinding.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace llvm;
+using namespace hlsl;
+
+std::optional<uint32_t>
+BindingInfo::findAvailableBinding(dxil::ResourceClass RC, uint32_t Space,
+ int32_t Size) {
+ BindingSpaces &BS = getBindingSpaces(RC);
+ RegisterSpace &RS = BS.getOrInsertSpace(Space);
+ return RS.findAvailableBinding(Size);
+}
+
+BindingInfo::RegisterSpace &
+BindingInfo::BindingSpaces::getOrInsertSpace(uint32_t Space) {
+ for (auto It = Spaces.begin(), End = Spaces.end(); It != End; ++It) {
+ if (It->Space == Space)
+ return *It;
+ if (It->Space < Space)
+ continue;
+ return *Spaces.insert(It, Space);
+ }
+ return Spaces.emplace_back(Space);
+}
+
+std::optional<uint32_t>
+BindingInfo::RegisterSpace::findAvailableBinding(int32_t Size) {
+ assert((Size == -1 || Size > 0) && "invalid size");
+
+ if (FreeRanges.empty())
+ return std::nullopt;
+
+ // unbounded array
+ if (Size == -1) {
+ BindingRange &Last = FreeRanges.back();
+ if (Last.UpperBound != ~0u)
+ // this space is already occupied by an unbounded array
+ return std::nullopt;
+ uint32_t RegSlot = Last.LowerBound;
+ FreeRanges.pop_back();
+ return RegSlot;
+ }
+
+ // single resource or fixed-size array
+ for (BindingRange &R : FreeRanges) {
+ // compare the size as uint64_t to prevent overflow for range (0, ~0u)
+ if ((uint64_t)R.UpperBound - R.LowerBound + 1 < (uint64_t)Size)
+ continue;
+ uint32_t RegSlot = R.LowerBound;
+ // This might create a range where (LowerBound == UpperBound + 1). When
+ // that happens, the next time this function is called the range will
+ // skipped over by the check above (at this point Size is always > 0).
+ R.LowerBound += Size;
+ return RegSlot;
+ }
+
+ return std::nullopt;
+}
+
+BindingInfo BindingInfoBuilder::calculateBindingInfo(
+ llvm::function_ref<void(const BindingInfoBuilder &Builder,
+ const Binding &Overlapping)>
+ ReportOverlap) {
+ // sort all the collected bindings
+ llvm::stable_sort(Bindings);
+
+ // remove duplicates
+ Binding *NewEnd = llvm::unique(Bindings);
+ if (NewEnd != Bindings.end())
+ Bindings.erase(NewEnd);
+
+ BindingInfo Info;
+
+ // Go over the sorted bindings and build up lists of free register ranges
+ // for each binding type and used spaces. Bindings are sorted by resource
+ // class, space, and lower bound register slot.
+ BindingInfo::BindingSpaces *BS =
+ &Info.getBindingSpaces(dxil::ResourceClass::SRV);
+ for (const Binding &B : Bindings) {
+ if (BS->RC != B.RC)
+ // move to the next resource class spaces
+ BS = &Info.getBindingSpaces(B.RC);
+
+ BindingInfo::RegisterSpace *S = BS->Spaces.empty()
+ ? &BS->Spaces.emplace_back(B.Space)
+ : &BS->Spaces.back();
+ assert(S->Space <= B.Space && "bindings not sorted correctly?");
+ if (B.Space != S->Space)
+ // add new space
+ S = &BS->Spaces.emplace_back(B.Space);
+
+ // The space is full - there are no free slots left, or the rest of the
+ // slots are taken by an unbounded array. Report the overlapping to the
+ // caller.
+ if (S->FreeRanges.empty() || S->FreeRanges.back().UpperBound < ~0u) {
+ ReportOverlap(*this, B);
+ continue;
+ }
+ // adjust the last free range lower bound, split it in two, or remove it
+ BindingInfo::BindingRange &LastFreeRange = S->FreeRanges.back();
+ if (LastFreeRange.LowerBound == B.LowerBound) {
+ if (B.UpperBound < ~0u)
+ LastFreeRange.LowerBound = B.UpperBound + 1;
+ else
+ S->FreeRanges.pop_back();
+ } else if (LastFreeRange.LowerBound < B.LowerBound) {
+ LastFreeRange.UpperBound = B.LowerBound - 1;
+ if (B.UpperBound < ~0u)
+ S->FreeRanges.emplace_back(B.UpperBound + 1, ~0u);
+ } else {
+ // We don't have room here. Report the overlapping binding to the caller
+ // and mark any extra space this binding would use as unavailable.
+ ReportOverlap(*this, B);
+ if (B.UpperBound < ~0u)
+ LastFreeRange.LowerBound =
+ std::max(LastFreeRange.LowerBound, B.UpperBound + 1);
+ else
+ S->FreeRanges.pop_back();
+ }
+ }
+
+ return Info;
+}
+
+const BindingInfoBuilder::Binding &BindingInfoBuilder::findOverlapping(
+ const BindingInfoBuilder::Binding &ReportedBinding) const {
+ for (const BindingInfoBuilder::Binding &Other : Bindings)
+ if (ReportedBinding.LowerBound <= Other.UpperBound &&
+ Other.LowerBound <= ReportedBinding.UpperBound)
+ return Other;
+
+ llvm_unreachable("Searching for overlap for binding that does not overlap");
+}
diff --git a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
index 53f5934..48ff1ca 100644
--- a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
+++ b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
@@ -13,15 +13,21 @@
#include "llvm/Frontend/HLSL/RootSignatureMetadata.h"
#include "llvm/Frontend/HLSL/RootSignatureValidations.h"
-#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/ScopedPrinter.h"
+using namespace llvm;
+
namespace llvm {
namespace hlsl {
namespace rootsig {
+char GenericRSMetadataError::ID;
+char InvalidRSMetadataFormat::ID;
+char InvalidRSMetadataValue::ID;
+template <typename T> char RootSignatureValidationError<T>::ID;
+
static std::optional<uint32_t> extractMdIntValue(MDNode *Node,
unsigned int OpId) {
if (auto *CI =
@@ -45,19 +51,6 @@ static std::optional<StringRef> extractMdStringValue(MDNode *Node,
return NodeText->getString();
}
-static bool reportError(LLVMContext *Ctx, Twine Message,
- DiagnosticSeverity Severity = DS_Error) {
- Ctx->diagnose(DiagnosticInfoGeneric(Message, Severity));
- return true;
-}
-
-static bool reportValueError(LLVMContext *Ctx, Twine ParamName,
- uint32_t Value) {
- Ctx->diagnose(DiagnosticInfoGeneric(
- "Invalid value for " + ParamName + ": " + Twine(Value), DS_Error));
- return true;
-}
-
static const EnumEntry<dxil::ResourceClass> ResourceClassNames[] = {
{"CBV", dxil::ResourceClass::CBuffer},
{"SRV", dxil::ResourceClass::SRV},
@@ -120,7 +113,7 @@ MDNode *MetadataBuilder::BuildRootFlags(const dxbc::RootFlags &Flags) {
IRBuilder<> Builder(Ctx);
Metadata *Operands[] = {
MDString::get(Ctx, "RootFlags"),
- ConstantAsMetadata::get(Builder.getInt32(llvm::to_underlying(Flags))),
+ ConstantAsMetadata::get(Builder.getInt32(to_underlying(Flags))),
};
return MDNode::get(Ctx, Operands);
}
@@ -130,7 +123,7 @@ MDNode *MetadataBuilder::BuildRootConstants(const RootConstants &Constants) {
Metadata *Operands[] = {
MDString::get(Ctx, "RootConstants"),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Constants.Visibility))),
+ Builder.getInt32(to_underlying(Constants.Visibility))),
ConstantAsMetadata::get(Builder.getInt32(Constants.Reg.Number)),
ConstantAsMetadata::get(Builder.getInt32(Constants.Space)),
ConstantAsMetadata::get(Builder.getInt32(Constants.Num32BitConstants)),
@@ -140,18 +133,18 @@ MDNode *MetadataBuilder::BuildRootConstants(const RootConstants &Constants) {
MDNode *MetadataBuilder::BuildRootDescriptor(const RootDescriptor &Descriptor) {
IRBuilder<> Builder(Ctx);
- std::optional<StringRef> ResName = getResourceName(
- dxil::ResourceClass(llvm::to_underlying(Descriptor.Type)));
+ std::optional<StringRef> ResName =
+ getResourceName(dxil::ResourceClass(to_underlying(Descriptor.Type)));
assert(ResName && "Provided an invalid Resource Class");
- llvm::SmallString<7> Name({"Root", *ResName});
+ SmallString<7> Name({"Root", *ResName});
Metadata *Operands[] = {
MDString::get(Ctx, Name),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Descriptor.Visibility))),
+ Builder.getInt32(to_underlying(Descriptor.Visibility))),
ConstantAsMetadata::get(Builder.getInt32(Descriptor.Reg.Number)),
ConstantAsMetadata::get(Builder.getInt32(Descriptor.Space)),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Descriptor.Flags))),
+ Builder.getInt32(to_underlying(Descriptor.Flags))),
};
return MDNode::get(Ctx, Operands);
}
@@ -162,7 +155,7 @@ MDNode *MetadataBuilder::BuildDescriptorTable(const DescriptorTable &Table) {
// Set the mandatory arguments
TableOperands.push_back(MDString::get(Ctx, "DescriptorTable"));
TableOperands.push_back(ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Table.Visibility))));
+ Builder.getInt32(to_underlying(Table.Visibility))));
// Remaining operands are references to the table's clauses. The in-memory
// representation of the Root Elements created from parsing will ensure that
@@ -182,7 +175,7 @@ MDNode *MetadataBuilder::BuildDescriptorTableClause(
const DescriptorTableClause &Clause) {
IRBuilder<> Builder(Ctx);
std::optional<StringRef> ResName =
- getResourceName(dxil::ResourceClass(llvm::to_underlying(Clause.Type)));
+ getResourceName(dxil::ResourceClass(to_underlying(Clause.Type)));
assert(ResName && "Provided an invalid Resource Class");
Metadata *Operands[] = {
MDString::get(Ctx, *ResName),
@@ -190,8 +183,7 @@ MDNode *MetadataBuilder::BuildDescriptorTableClause(
ConstantAsMetadata::get(Builder.getInt32(Clause.Reg.Number)),
ConstantAsMetadata::get(Builder.getInt32(Clause.Space)),
ConstantAsMetadata::get(Builder.getInt32(Clause.Offset)),
- ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Clause.Flags))),
+ ConstantAsMetadata::get(Builder.getInt32(to_underlying(Clause.Flags))),
};
return MDNode::get(Ctx, Operands);
}
@@ -200,108 +192,102 @@ MDNode *MetadataBuilder::BuildStaticSampler(const StaticSampler &Sampler) {
IRBuilder<> Builder(Ctx);
Metadata *Operands[] = {
MDString::get(Ctx, "StaticSampler"),
+ ConstantAsMetadata::get(Builder.getInt32(to_underlying(Sampler.Filter))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.Filter))),
+ Builder.getInt32(to_underlying(Sampler.AddressU))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.AddressU))),
+ Builder.getInt32(to_underlying(Sampler.AddressV))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.AddressV))),
+ Builder.getInt32(to_underlying(Sampler.AddressW))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.AddressW))),
- ConstantAsMetadata::get(llvm::ConstantFP::get(llvm::Type::getFloatTy(Ctx),
- Sampler.MipLODBias)),
+ ConstantFP::get(Type::getFloatTy(Ctx), Sampler.MipLODBias)),
ConstantAsMetadata::get(Builder.getInt32(Sampler.MaxAnisotropy)),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.CompFunc))),
+ Builder.getInt32(to_underlying(Sampler.CompFunc))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.BorderColor))),
+ Builder.getInt32(to_underlying(Sampler.BorderColor))),
ConstantAsMetadata::get(
- llvm::ConstantFP::get(llvm::Type::getFloatTy(Ctx), Sampler.MinLOD)),
+ ConstantFP::get(Type::getFloatTy(Ctx), Sampler.MinLOD)),
ConstantAsMetadata::get(
- llvm::ConstantFP::get(llvm::Type::getFloatTy(Ctx), Sampler.MaxLOD)),
+ ConstantFP::get(Type::getFloatTy(Ctx), Sampler.MaxLOD)),
ConstantAsMetadata::get(Builder.getInt32(Sampler.Reg.Number)),
ConstantAsMetadata::get(Builder.getInt32(Sampler.Space)),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.Visibility))),
+ Builder.getInt32(to_underlying(Sampler.Visibility))),
};
return MDNode::get(Ctx, Operands);
}
-bool MetadataParser::parseRootFlags(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootFlagNode) {
-
+Error MetadataParser::parseRootFlags(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootFlagNode) {
if (RootFlagNode->getNumOperands() != 2)
- return reportError(Ctx, "Invalid format for RootFlag Element");
+ return make_error<InvalidRSMetadataFormat>("RootFlag Element");
if (std::optional<uint32_t> Val = extractMdIntValue(RootFlagNode, 1))
RSD.Flags = *Val;
else
- return reportError(Ctx, "Invalid value for RootFlag");
+ return make_error<InvalidRSMetadataValue>("RootFlag");
- return false;
+ return Error::success();
}
-bool MetadataParser::parseRootConstants(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootConstantNode) {
-
+Error MetadataParser::parseRootConstants(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootConstantNode) {
if (RootConstantNode->getNumOperands() != 5)
- return reportError(Ctx, "Invalid format for RootConstants Element");
+ return make_error<InvalidRSMetadataFormat>("RootConstants Element");
dxbc::RTS0::v1::RootParameterHeader Header;
// The parameter offset doesn't matter here - we recalculate it during
// serialization Header.ParameterOffset = 0;
- Header.ParameterType =
- llvm::to_underlying(dxbc::RootParameterType::Constants32Bit);
+ Header.ParameterType = to_underlying(dxbc::RootParameterType::Constants32Bit);
if (std::optional<uint32_t> Val = extractMdIntValue(RootConstantNode, 1))
Header.ShaderVisibility = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderVisibility");
+ return make_error<InvalidRSMetadataValue>("ShaderVisibility");
dxbc::RTS0::v1::RootConstants Constants;
if (std::optional<uint32_t> Val = extractMdIntValue(RootConstantNode, 2))
Constants.ShaderRegister = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderRegister");
+ return make_error<InvalidRSMetadataValue>("ShaderRegister");
if (std::optional<uint32_t> Val = extractMdIntValue(RootConstantNode, 3))
Constants.RegisterSpace = *Val;
else
- return reportError(Ctx, "Invalid value for RegisterSpace");
+ return make_error<InvalidRSMetadataValue>("RegisterSpace");
if (std::optional<uint32_t> Val = extractMdIntValue(RootConstantNode, 4))
Constants.Num32BitValues = *Val;
else
- return reportError(Ctx, "Invalid value for Num32BitValues");
+ return make_error<InvalidRSMetadataValue>("Num32BitValues");
RSD.ParametersContainer.addParameter(Header, Constants);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseRootDescriptors(
- LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootDescriptorNode, RootSignatureElementKind ElementKind) {
+Error MetadataParser::parseRootDescriptors(
+ mcdxbc::RootSignatureDesc &RSD, MDNode *RootDescriptorNode,
+ RootSignatureElementKind ElementKind) {
assert(ElementKind == RootSignatureElementKind::SRV ||
ElementKind == RootSignatureElementKind::UAV ||
ElementKind == RootSignatureElementKind::CBV &&
"parseRootDescriptors should only be called with RootDescriptor "
"element kind.");
if (RootDescriptorNode->getNumOperands() != 5)
- return reportError(Ctx, "Invalid format for Root Descriptor Element");
+ return make_error<InvalidRSMetadataFormat>("Root Descriptor Element");
dxbc::RTS0::v1::RootParameterHeader Header;
switch (ElementKind) {
case RootSignatureElementKind::SRV:
- Header.ParameterType = llvm::to_underlying(dxbc::RootParameterType::SRV);
+ Header.ParameterType = to_underlying(dxbc::RootParameterType::SRV);
break;
case RootSignatureElementKind::UAV:
- Header.ParameterType = llvm::to_underlying(dxbc::RootParameterType::UAV);
+ Header.ParameterType = to_underlying(dxbc::RootParameterType::UAV);
break;
case RootSignatureElementKind::CBV:
- Header.ParameterType = llvm::to_underlying(dxbc::RootParameterType::CBV);
+ Header.ParameterType = to_underlying(dxbc::RootParameterType::CBV);
break;
default:
llvm_unreachable("invalid Root Descriptor kind");
@@ -311,40 +297,38 @@ bool MetadataParser::parseRootDescriptors(
if (std::optional<uint32_t> Val = extractMdIntValue(RootDescriptorNode, 1))
Header.ShaderVisibility = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderVisibility");
+ return make_error<InvalidRSMetadataValue>("ShaderVisibility");
dxbc::RTS0::v2::RootDescriptor Descriptor;
if (std::optional<uint32_t> Val = extractMdIntValue(RootDescriptorNode, 2))
Descriptor.ShaderRegister = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderRegister");
+ return make_error<InvalidRSMetadataValue>("ShaderRegister");
if (std::optional<uint32_t> Val = extractMdIntValue(RootDescriptorNode, 3))
Descriptor.RegisterSpace = *Val;
else
- return reportError(Ctx, "Invalid value for RegisterSpace");
+ return make_error<InvalidRSMetadataValue>("RegisterSpace");
if (RSD.Version == 1) {
RSD.ParametersContainer.addParameter(Header, Descriptor);
- return false;
+ return Error::success();
}
assert(RSD.Version > 1);
if (std::optional<uint32_t> Val = extractMdIntValue(RootDescriptorNode, 4))
Descriptor.Flags = *Val;
else
- return reportError(Ctx, "Invalid value for Root Descriptor Flags");
+ return make_error<InvalidRSMetadataValue>("Root Descriptor Flags");
RSD.ParametersContainer.addParameter(Header, Descriptor);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseDescriptorRange(LLVMContext *Ctx,
- mcdxbc::DescriptorTable &Table,
- MDNode *RangeDescriptorNode) {
-
+Error MetadataParser::parseDescriptorRange(mcdxbc::DescriptorTable &Table,
+ MDNode *RangeDescriptorNode) {
if (RangeDescriptorNode->getNumOperands() != 6)
- return reportError(Ctx, "Invalid format for Descriptor Range");
+ return make_error<InvalidRSMetadataFormat>("Descriptor Range");
dxbc::RTS0::v2::DescriptorRange Range;
@@ -352,162 +336,161 @@ bool MetadataParser::parseDescriptorRange(LLVMContext *Ctx,
extractMdStringValue(RangeDescriptorNode, 0);
if (!ElementText.has_value())
- return reportError(Ctx, "Descriptor Range, first element is not a string.");
+ return make_error<InvalidRSMetadataFormat>("Descriptor Range");
Range.RangeType =
StringSwitch<uint32_t>(*ElementText)
- .Case("CBV", llvm::to_underlying(dxbc::DescriptorRangeType::CBV))
- .Case("SRV", llvm::to_underlying(dxbc::DescriptorRangeType::SRV))
- .Case("UAV", llvm::to_underlying(dxbc::DescriptorRangeType::UAV))
- .Case("Sampler",
- llvm::to_underlying(dxbc::DescriptorRangeType::Sampler))
+ .Case("CBV", to_underlying(dxbc::DescriptorRangeType::CBV))
+ .Case("SRV", to_underlying(dxbc::DescriptorRangeType::SRV))
+ .Case("UAV", to_underlying(dxbc::DescriptorRangeType::UAV))
+ .Case("Sampler", to_underlying(dxbc::DescriptorRangeType::Sampler))
.Default(~0U);
if (Range.RangeType == ~0U)
- return reportError(Ctx, "Invalid Descriptor Range type: " + *ElementText);
+ return make_error<GenericRSMetadataError>("Invalid Descriptor Range type.",
+ RangeDescriptorNode);
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 1))
Range.NumDescriptors = *Val;
else
- return reportError(Ctx, "Invalid value for Number of Descriptor in Range");
+ return make_error<GenericRSMetadataError>("Number of Descriptor in Range",
+ RangeDescriptorNode);
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 2))
Range.BaseShaderRegister = *Val;
else
- return reportError(Ctx, "Invalid value for BaseShaderRegister");
+ return make_error<InvalidRSMetadataValue>("BaseShaderRegister");
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 3))
Range.RegisterSpace = *Val;
else
- return reportError(Ctx, "Invalid value for RegisterSpace");
+ return make_error<InvalidRSMetadataValue>("RegisterSpace");
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 4))
Range.OffsetInDescriptorsFromTableStart = *Val;
else
- return reportError(Ctx,
- "Invalid value for OffsetInDescriptorsFromTableStart");
+ return make_error<InvalidRSMetadataValue>(
+ "OffsetInDescriptorsFromTableStart");
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 5))
Range.Flags = *Val;
else
- return reportError(Ctx, "Invalid value for Descriptor Range Flags");
+ return make_error<InvalidRSMetadataValue>("Descriptor Range Flags");
Table.Ranges.push_back(Range);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseDescriptorTable(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *DescriptorTableNode) {
+Error MetadataParser::parseDescriptorTable(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *DescriptorTableNode) {
const unsigned int NumOperands = DescriptorTableNode->getNumOperands();
if (NumOperands < 2)
- return reportError(Ctx, "Invalid format for Descriptor Table");
+ return make_error<InvalidRSMetadataFormat>("Descriptor Table");
dxbc::RTS0::v1::RootParameterHeader Header;
if (std::optional<uint32_t> Val = extractMdIntValue(DescriptorTableNode, 1))
Header.ShaderVisibility = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderVisibility");
+ return make_error<InvalidRSMetadataValue>("ShaderVisibility");
mcdxbc::DescriptorTable Table;
Header.ParameterType =
- llvm::to_underlying(dxbc::RootParameterType::DescriptorTable);
+ to_underlying(dxbc::RootParameterType::DescriptorTable);
for (unsigned int I = 2; I < NumOperands; I++) {
MDNode *Element = dyn_cast<MDNode>(DescriptorTableNode->getOperand(I));
if (Element == nullptr)
- return reportError(Ctx, "Missing Root Element Metadata Node.");
+ return make_error<GenericRSMetadataError>(
+ "Missing Root Element Metadata Node.", DescriptorTableNode);
- if (parseDescriptorRange(Ctx, Table, Element))
- return true;
+ if (auto Err = parseDescriptorRange(Table, Element))
+ return Err;
}
RSD.ParametersContainer.addParameter(Header, Table);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseStaticSampler(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *StaticSamplerNode) {
+Error MetadataParser::parseStaticSampler(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *StaticSamplerNode) {
if (StaticSamplerNode->getNumOperands() != 14)
- return reportError(Ctx, "Invalid format for Static Sampler");
+ return make_error<InvalidRSMetadataFormat>("Static Sampler");
dxbc::RTS0::v1::StaticSampler Sampler;
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 1))
Sampler.Filter = *Val;
else
- return reportError(Ctx, "Invalid value for Filter");
+ return make_error<InvalidRSMetadataValue>("Filter");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 2))
Sampler.AddressU = *Val;
else
- return reportError(Ctx, "Invalid value for AddressU");
+ return make_error<InvalidRSMetadataValue>("AddressU");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 3))
Sampler.AddressV = *Val;
else
- return reportError(Ctx, "Invalid value for AddressV");
+ return make_error<InvalidRSMetadataValue>("AddressV");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 4))
Sampler.AddressW = *Val;
else
- return reportError(Ctx, "Invalid value for AddressW");
+ return make_error<InvalidRSMetadataValue>("AddressW");
if (std::optional<float> Val = extractMdFloatValue(StaticSamplerNode, 5))
Sampler.MipLODBias = *Val;
else
- return reportError(Ctx, "Invalid value for MipLODBias");
+ return make_error<InvalidRSMetadataValue>("MipLODBias");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 6))
Sampler.MaxAnisotropy = *Val;
else
- return reportError(Ctx, "Invalid value for MaxAnisotropy");
+ return make_error<InvalidRSMetadataValue>("MaxAnisotropy");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 7))
Sampler.ComparisonFunc = *Val;
else
- return reportError(Ctx, "Invalid value for ComparisonFunc ");
+ return make_error<InvalidRSMetadataValue>("ComparisonFunc");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 8))
Sampler.BorderColor = *Val;
else
- return reportError(Ctx, "Invalid value for ComparisonFunc ");
+ return make_error<InvalidRSMetadataValue>("ComparisonFunc");
if (std::optional<float> Val = extractMdFloatValue(StaticSamplerNode, 9))
Sampler.MinLOD = *Val;
else
- return reportError(Ctx, "Invalid value for MinLOD");
+ return make_error<InvalidRSMetadataValue>("MinLOD");
if (std::optional<float> Val = extractMdFloatValue(StaticSamplerNode, 10))
Sampler.MaxLOD = *Val;
else
- return reportError(Ctx, "Invalid value for MaxLOD");
+ return make_error<InvalidRSMetadataValue>("MaxLOD");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 11))
Sampler.ShaderRegister = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderRegister");
+ return make_error<InvalidRSMetadataValue>("ShaderRegister");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 12))
Sampler.RegisterSpace = *Val;
else
- return reportError(Ctx, "Invalid value for RegisterSpace");
+ return make_error<InvalidRSMetadataValue>("RegisterSpace");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 13))
Sampler.ShaderVisibility = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderVisibility");
+ return make_error<InvalidRSMetadataValue>("ShaderVisibility");
RSD.StaticSamplers.push_back(Sampler);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseRootSignatureElement(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *Element) {
+Error MetadataParser::parseRootSignatureElement(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *Element) {
std::optional<StringRef> ElementText = extractMdStringValue(Element, 0);
if (!ElementText.has_value())
- return reportError(Ctx, "Invalid format for Root Element");
+ return make_error<InvalidRSMetadataFormat>("Root Element");
RootSignatureElementKind ElementKind =
StringSwitch<RootSignatureElementKind>(*ElementText)
@@ -523,79 +506,109 @@ bool MetadataParser::parseRootSignatureElement(LLVMContext *Ctx,
switch (ElementKind) {
case RootSignatureElementKind::RootFlags:
- return parseRootFlags(Ctx, RSD, Element);
+ return parseRootFlags(RSD, Element);
case RootSignatureElementKind::RootConstants:
- return parseRootConstants(Ctx, RSD, Element);
+ return parseRootConstants(RSD, Element);
case RootSignatureElementKind::CBV:
case RootSignatureElementKind::SRV:
case RootSignatureElementKind::UAV:
- return parseRootDescriptors(Ctx, RSD, Element, ElementKind);
+ return parseRootDescriptors(RSD, Element, ElementKind);
case RootSignatureElementKind::DescriptorTable:
- return parseDescriptorTable(Ctx, RSD, Element);
+ return parseDescriptorTable(RSD, Element);
case RootSignatureElementKind::StaticSamplers:
- return parseStaticSampler(Ctx, RSD, Element);
+ return parseStaticSampler(RSD, Element);
case RootSignatureElementKind::Error:
- return reportError(Ctx, "Invalid Root Signature Element: " + *ElementText);
+ return make_error<GenericRSMetadataError>("Invalid Root Signature Element",
+ Element);
}
llvm_unreachable("Unhandled RootSignatureElementKind enum.");
}
-bool MetadataParser::validateRootSignature(
- LLVMContext *Ctx, const llvm::mcdxbc::RootSignatureDesc &RSD) {
- if (!llvm::hlsl::rootsig::verifyVersion(RSD.Version)) {
- return reportValueError(Ctx, "Version", RSD.Version);
+Error MetadataParser::validateRootSignature(
+ const mcdxbc::RootSignatureDesc &RSD) {
+ Error DeferredErrs = Error::success();
+ if (!hlsl::rootsig::verifyVersion(RSD.Version)) {
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "Version", RSD.Version));
}
- if (!llvm::hlsl::rootsig::verifyRootFlag(RSD.Flags)) {
- return reportValueError(Ctx, "RootFlags", RSD.Flags);
+ if (!hlsl::rootsig::verifyRootFlag(RSD.Flags)) {
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RootFlags", RSD.Flags));
}
for (const mcdxbc::RootParameterInfo &Info : RSD.ParametersContainer) {
if (!dxbc::isValidShaderVisibility(Info.Header.ShaderVisibility))
- return reportValueError(Ctx, "ShaderVisibility",
- Info.Header.ShaderVisibility);
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ShaderVisibility", Info.Header.ShaderVisibility));
assert(dxbc::isValidParameterType(Info.Header.ParameterType) &&
"Invalid value for ParameterType");
switch (Info.Header.ParameterType) {
- case llvm::to_underlying(dxbc::RootParameterType::CBV):
- case llvm::to_underlying(dxbc::RootParameterType::UAV):
- case llvm::to_underlying(dxbc::RootParameterType::SRV): {
+ case to_underlying(dxbc::RootParameterType::CBV):
+ case to_underlying(dxbc::RootParameterType::UAV):
+ case to_underlying(dxbc::RootParameterType::SRV): {
const dxbc::RTS0::v2::RootDescriptor &Descriptor =
RSD.ParametersContainer.getRootDescriptor(Info.Location);
- if (!llvm::hlsl::rootsig::verifyRegisterValue(Descriptor.ShaderRegister))
- return reportValueError(Ctx, "ShaderRegister",
- Descriptor.ShaderRegister);
-
- if (!llvm::hlsl::rootsig::verifyRegisterSpace(Descriptor.RegisterSpace))
- return reportValueError(Ctx, "RegisterSpace", Descriptor.RegisterSpace);
+ if (!hlsl::rootsig::verifyRegisterValue(Descriptor.ShaderRegister))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ShaderRegister", Descriptor.ShaderRegister));
+
+ if (!hlsl::rootsig::verifyRegisterSpace(Descriptor.RegisterSpace))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RegisterSpace", Descriptor.RegisterSpace));
if (RSD.Version > 1) {
- if (!llvm::hlsl::rootsig::verifyRootDescriptorFlag(RSD.Version,
- Descriptor.Flags))
- return reportValueError(Ctx, "RootDescriptorFlag", Descriptor.Flags);
+ if (!hlsl::rootsig::verifyRootDescriptorFlag(RSD.Version,
+ Descriptor.Flags))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RootDescriptorFlag", Descriptor.Flags));
}
break;
}
- case llvm::to_underlying(dxbc::RootParameterType::DescriptorTable): {
+ case to_underlying(dxbc::RootParameterType::DescriptorTable): {
const mcdxbc::DescriptorTable &Table =
RSD.ParametersContainer.getDescriptorTable(Info.Location);
for (const dxbc::RTS0::v2::DescriptorRange &Range : Table) {
- if (!llvm::hlsl::rootsig::verifyRangeType(Range.RangeType))
- return reportValueError(Ctx, "RangeType", Range.RangeType);
-
- if (!llvm::hlsl::rootsig::verifyRegisterSpace(Range.RegisterSpace))
- return reportValueError(Ctx, "RegisterSpace", Range.RegisterSpace);
-
- if (!llvm::hlsl::rootsig::verifyNumDescriptors(Range.NumDescriptors))
- return reportValueError(Ctx, "NumDescriptors", Range.NumDescriptors);
-
- if (!llvm::hlsl::rootsig::verifyDescriptorRangeFlag(
+ if (!hlsl::rootsig::verifyRangeType(Range.RangeType))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RangeType", Range.RangeType));
+
+ if (!hlsl::rootsig::verifyRegisterSpace(Range.RegisterSpace))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RegisterSpace", Range.RegisterSpace));
+
+ if (!hlsl::rootsig::verifyNumDescriptors(Range.NumDescriptors))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "NumDescriptors", Range.NumDescriptors));
+
+ if (!hlsl::rootsig::verifyDescriptorRangeFlag(
RSD.Version, Range.RangeType, Range.Flags))
- return reportValueError(Ctx, "DescriptorFlag", Range.Flags);
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "DescriptorFlag", Range.Flags));
}
break;
}
@@ -603,65 +616,108 @@ bool MetadataParser::validateRootSignature(
}
for (const dxbc::RTS0::v1::StaticSampler &Sampler : RSD.StaticSamplers) {
- if (!llvm::hlsl::rootsig::verifySamplerFilter(Sampler.Filter))
- return reportValueError(Ctx, "Filter", Sampler.Filter);
-
- if (!llvm::hlsl::rootsig::verifyAddress(Sampler.AddressU))
- return reportValueError(Ctx, "AddressU", Sampler.AddressU);
-
- if (!llvm::hlsl::rootsig::verifyAddress(Sampler.AddressV))
- return reportValueError(Ctx, "AddressV", Sampler.AddressV);
-
- if (!llvm::hlsl::rootsig::verifyAddress(Sampler.AddressW))
- return reportValueError(Ctx, "AddressW", Sampler.AddressW);
-
- if (!llvm::hlsl::rootsig::verifyMipLODBias(Sampler.MipLODBias))
- return reportValueError(Ctx, "MipLODBias", Sampler.MipLODBias);
-
- if (!llvm::hlsl::rootsig::verifyMaxAnisotropy(Sampler.MaxAnisotropy))
- return reportValueError(Ctx, "MaxAnisotropy", Sampler.MaxAnisotropy);
-
- if (!llvm::hlsl::rootsig::verifyComparisonFunc(Sampler.ComparisonFunc))
- return reportValueError(Ctx, "ComparisonFunc", Sampler.ComparisonFunc);
-
- if (!llvm::hlsl::rootsig::verifyBorderColor(Sampler.BorderColor))
- return reportValueError(Ctx, "BorderColor", Sampler.BorderColor);
-
- if (!llvm::hlsl::rootsig::verifyLOD(Sampler.MinLOD))
- return reportValueError(Ctx, "MinLOD", Sampler.MinLOD);
-
- if (!llvm::hlsl::rootsig::verifyLOD(Sampler.MaxLOD))
- return reportValueError(Ctx, "MaxLOD", Sampler.MaxLOD);
-
- if (!llvm::hlsl::rootsig::verifyRegisterValue(Sampler.ShaderRegister))
- return reportValueError(Ctx, "ShaderRegister", Sampler.ShaderRegister);
-
- if (!llvm::hlsl::rootsig::verifyRegisterSpace(Sampler.RegisterSpace))
- return reportValueError(Ctx, "RegisterSpace", Sampler.RegisterSpace);
+ if (!hlsl::rootsig::verifySamplerFilter(Sampler.Filter))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "Filter", Sampler.Filter));
+
+ if (!hlsl::rootsig::verifyAddress(Sampler.AddressU))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "AddressU", Sampler.AddressU));
+
+ if (!hlsl::rootsig::verifyAddress(Sampler.AddressV))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "AddressV", Sampler.AddressV));
+
+ if (!hlsl::rootsig::verifyAddress(Sampler.AddressW))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "AddressW", Sampler.AddressW));
+
+ if (!hlsl::rootsig::verifyMipLODBias(Sampler.MipLODBias))
+ DeferredErrs = joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<float>>(
+ "MipLODBias", Sampler.MipLODBias));
+
+ if (!hlsl::rootsig::verifyMaxAnisotropy(Sampler.MaxAnisotropy))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "MaxAnisotropy", Sampler.MaxAnisotropy));
+
+ if (!hlsl::rootsig::verifyComparisonFunc(Sampler.ComparisonFunc))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ComparisonFunc", Sampler.ComparisonFunc));
+
+ if (!hlsl::rootsig::verifyBorderColor(Sampler.BorderColor))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "BorderColor", Sampler.BorderColor));
+
+ if (!hlsl::rootsig::verifyLOD(Sampler.MinLOD))
+ DeferredErrs = joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<float>>(
+ "MinLOD", Sampler.MinLOD));
+
+ if (!hlsl::rootsig::verifyLOD(Sampler.MaxLOD))
+ DeferredErrs = joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<float>>(
+ "MaxLOD", Sampler.MaxLOD));
+
+ if (!hlsl::rootsig::verifyRegisterValue(Sampler.ShaderRegister))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ShaderRegister", Sampler.ShaderRegister));
+
+ if (!hlsl::rootsig::verifyRegisterSpace(Sampler.RegisterSpace))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RegisterSpace", Sampler.RegisterSpace));
if (!dxbc::isValidShaderVisibility(Sampler.ShaderVisibility))
- return reportValueError(Ctx, "ShaderVisibility",
- Sampler.ShaderVisibility);
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ShaderVisibility", Sampler.ShaderVisibility));
}
- return false;
+ return DeferredErrs;
}
-bool MetadataParser::ParseRootSignature(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD) {
- bool HasError = false;
-
- // Loop through the Root Elements of the root signature.
+Expected<mcdxbc::RootSignatureDesc>
+MetadataParser::ParseRootSignature(uint32_t Version) {
+ Error DeferredErrs = Error::success();
+ mcdxbc::RootSignatureDesc RSD;
+ RSD.Version = Version;
for (const auto &Operand : Root->operands()) {
MDNode *Element = dyn_cast<MDNode>(Operand);
if (Element == nullptr)
- return reportError(Ctx, "Missing Root Element Metadata Node.");
+ return joinErrors(std::move(DeferredErrs),
+ make_error<GenericRSMetadataError>(
+ "Missing Root Element Metadata Node.", nullptr));
- HasError = HasError || parseRootSignatureElement(Ctx, RSD, Element) ||
- validateRootSignature(Ctx, RSD);
+ if (auto Err = parseRootSignatureElement(RSD, Element))
+ DeferredErrs = joinErrors(std::move(DeferredErrs), std::move(Err));
}
- return HasError;
+ if (auto Err = validateRootSignature(RSD))
+ DeferredErrs = joinErrors(std::move(DeferredErrs), std::move(Err));
+
+ if (DeferredErrs)
+ return std::move(DeferredErrs);
+
+ return std::move(RSD);
}
} // namespace rootsig
} // namespace hlsl
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 7928772..3aa4f7a 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -1161,7 +1161,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitTargetKernel(
Builder.restoreIP(AllocaIP);
auto *KernelArgsPtr =
Builder.CreateAlloca(OpenMPIRBuilder::KernelArgs, nullptr, "kernel_args");
- Builder.restoreIP(Loc.IP);
+ updateToLocation(Loc);
for (unsigned I = 0, Size = KernelArgs.size(); I != Size; ++I) {
llvm::Value *Arg =
@@ -1189,7 +1189,6 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::emitKernelLaunch(
if (!updateToLocation(Loc))
return Loc.IP;
- Builder.restoreIP(Loc.IP);
// On top of the arrays that were filled up, the target offloading call
// takes as arguments the device id as well as the host pointer. The host
// pointer is used by the runtime library to identify the current target
@@ -5955,7 +5954,7 @@ OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc,
Builder.restoreIP(AllocaIP);
AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name);
ArgsBase->setAlignment(Align(8));
- Builder.restoreIP(Loc.IP);
+ updateToLocation(Loc);
// Store the index value with offset in depend vector.
for (unsigned I = 0; I < NumLoops; ++I) {
@@ -8081,7 +8080,7 @@ void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc,
".offload_ptrs");
AllocaInst *ArgSizes = Builder.CreateAlloca(
ArrI64Ty, /* ArraySize = */ nullptr, ".offload_sizes");
- Builder.restoreIP(Loc.IP);
+ updateToLocation(Loc);
MapperAllocas.ArgsBase = ArgsBase;
MapperAllocas.Args = Args;
MapperAllocas.ArgSizes = ArgSizes;
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 73e79c0..0323b4d 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/LTO/LTO.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StableHashing.h"
@@ -742,18 +743,19 @@ Error LTO::add(std::unique_ptr<InputFile> Input,
Conf.VisibilityScheme = Config::ELF;
}
- const SymbolResolution *ResI = Res.begin();
- for (unsigned I = 0; I != Input->Mods.size(); ++I)
- if (Error Err = addModule(*Input, I, ResI, Res.end()))
+ ArrayRef<SymbolResolution> InputRes = Res;
+ for (unsigned I = 0; I != Input->Mods.size(); ++I) {
+ if (auto Err = addModule(*Input, InputRes, I, Res).moveInto(Res))
return Err;
+ }
- assert(ResI == Res.end());
+ assert(Res.empty());
return Error::success();
}
-Error LTO::addModule(InputFile &Input, unsigned ModI,
- const SymbolResolution *&ResI,
- const SymbolResolution *ResE) {
+Expected<ArrayRef<SymbolResolution>>
+LTO::addModule(InputFile &Input, ArrayRef<SymbolResolution> InputRes,
+ unsigned ModI, ArrayRef<SymbolResolution> Res) {
Expected<BitcodeLTOInfo> LTOInfo = Input.Mods[ModI].getLTOInfo();
if (!LTOInfo)
return LTOInfo.takeError();
@@ -782,28 +784,32 @@ Error LTO::addModule(InputFile &Input, unsigned ModI,
bool IsThinLTO = LTOInfo->IsThinLTO && (LTOMode != LTOK_UnifiedRegular);
auto ModSyms = Input.module_symbols(ModI);
- addModuleToGlobalRes(ModSyms, {ResI, ResE},
+ addModuleToGlobalRes(ModSyms, Res,
IsThinLTO ? ThinLTO.ModuleMap.size() + 1 : 0,
LTOInfo->HasSummary);
if (IsThinLTO)
- return addThinLTO(BM, ModSyms, ResI, ResE);
+ return addThinLTO(BM, ModSyms, Res);
RegularLTO.EmptyCombinedModule = false;
- Expected<RegularLTOState::AddedModule> ModOrErr =
- addRegularLTO(BM, ModSyms, ResI, ResE);
+ auto ModOrErr = addRegularLTO(Input, InputRes, BM, ModSyms, Res);
if (!ModOrErr)
return ModOrErr.takeError();
+ Res = ModOrErr->second;
- if (!LTOInfo->HasSummary)
- return linkRegularLTO(std::move(*ModOrErr), /*LivenessFromIndex=*/false);
+ if (!LTOInfo->HasSummary) {
+ if (Error Err = linkRegularLTO(std::move(ModOrErr->first),
+ /*LivenessFromIndex=*/false))
+ return Err;
+ return Res;
+ }
// Regular LTO module summaries are added to a dummy module that represents
// the combined regular LTO module.
if (Error Err = BM.readSummary(ThinLTO.CombinedIndex, ""))
return Err;
- RegularLTO.ModsWithSummaries.push_back(std::move(*ModOrErr));
- return Error::success();
+ RegularLTO.ModsWithSummaries.push_back(std::move(ModOrErr->first));
+ return Res;
}
// Checks whether the given global value is in a non-prevailing comdat
@@ -839,10 +845,11 @@ handleNonPrevailingComdat(GlobalValue &GV,
// Add a regular LTO object to the link.
// The resulting module needs to be linked into the combined LTO module with
// linkRegularLTO.
-Expected<LTO::RegularLTOState::AddedModule>
-LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
- const SymbolResolution *&ResI,
- const SymbolResolution *ResE) {
+Expected<
+ std::pair<LTO::RegularLTOState::AddedModule, ArrayRef<SymbolResolution>>>
+LTO::addRegularLTO(InputFile &Input, ArrayRef<SymbolResolution> InputRes,
+ BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+ ArrayRef<SymbolResolution> Res) {
RegularLTOState::AddedModule Mod;
Expected<std::unique_ptr<Module>> MOrErr =
BM.getLazyModule(RegularLTO.Ctx, /*ShouldLazyLoadMetadata*/ true,
@@ -855,13 +862,34 @@ LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
if (Error Err = M.materializeMetadata())
return std::move(Err);
- // If cfi.functions is present and we are in regular LTO mode, LowerTypeTests
- // will rename local functions in the merged module as "<function name>.1".
- // This causes linking errors, since other parts of the module expect the
- // original function name.
- if (LTOMode == LTOK_UnifiedRegular)
+ if (LTOMode == LTOK_UnifiedRegular) {
+ // cfi.functions metadata is intended to be used with ThinLTO and may
+ // trigger invalid IR transformations if they are present when doing regular
+ // LTO, so delete it.
if (NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions"))
M.eraseNamedMetadata(CfiFunctionsMD);
+ } else if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
+ // Delete aliases entries for non-prevailing symbols on the ThinLTO side of
+ // this input file.
+ DenseSet<StringRef> Prevailing;
+ for (auto [I, R] : zip(Input.symbols(), InputRes))
+ if (R.Prevailing && !I.getIRName().empty())
+ Prevailing.insert(I.getIRName());
+ std::vector<MDNode *> AliasGroups;
+ for (MDNode *AliasGroup : AliasesMD->operands()) {
+ std::vector<Metadata *> Aliases;
+ for (Metadata *Alias : AliasGroup->operands()) {
+ if (isa<MDString>(Alias) &&
+ Prevailing.count(cast<MDString>(Alias)->getString()))
+ Aliases.push_back(Alias);
+ }
+ if (Aliases.size() > 1)
+ AliasGroups.push_back(MDTuple::get(RegularLTO.Ctx, Aliases));
+ }
+ AliasesMD->clearOperands();
+ for (MDNode *G : AliasGroups)
+ AliasesMD->addOperand(G);
+ }
UpgradeDebugInfo(M);
@@ -899,22 +927,22 @@ LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
std::set<const Comdat *> NonPrevailingComdats;
SmallSet<StringRef, 2> NonPrevailingAsmSymbols;
for (const InputFile::Symbol &Sym : Syms) {
- assert(ResI != ResE);
- SymbolResolution Res = *ResI++;
+ assert(!Res.empty());
+ const SymbolResolution &R = Res.consume_front();
assert(MsymI != MsymE);
ModuleSymbolTable::Symbol Msym = *MsymI++;
Skip();
if (GlobalValue *GV = dyn_cast_if_present<GlobalValue *>(Msym)) {
- if (Res.Prevailing) {
+ if (R.Prevailing) {
if (Sym.isUndefined())
continue;
Mod.Keep.push_back(GV);
// For symbols re-defined with linker -wrap and -defsym options,
// set the linkage to weak to inhibit IPO. The linkage will be
// restored by the linker.
- if (Res.LinkerRedefined)
+ if (R.LinkerRedefined)
GV->setLinkage(GlobalValue::WeakAnyLinkage);
GlobalValue::LinkageTypes OriginalLinkage = GV->getLinkage();
@@ -938,7 +966,7 @@ LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
}
// Set the 'local' flag based on the linker resolution for this symbol.
- if (Res.FinalDefinitionInLinkageUnit) {
+ if (R.FinalDefinitionInLinkageUnit) {
GV->setDSOLocal(true);
if (GV->hasDLLImportStorageClass())
GV->setDLLStorageClass(GlobalValue::DLLStorageClassTypes::
@@ -947,7 +975,7 @@ LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
} else if (auto *AS =
dyn_cast_if_present<ModuleSymbolTable::AsmSymbol *>(Msym)) {
// Collect non-prevailing symbols.
- if (!Res.Prevailing)
+ if (!R.Prevailing)
NonPrevailingAsmSymbols.insert(AS->first);
} else {
llvm_unreachable("unknown symbol type");
@@ -965,7 +993,7 @@ LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
CommonRes.Alignment =
std::max(Align(SymAlignValue), CommonRes.Alignment);
}
- CommonRes.Prevailing |= Res.Prevailing;
+ CommonRes.Prevailing |= R.Prevailing;
}
}
@@ -991,7 +1019,7 @@ LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
}
assert(MsymI == MsymE);
- return std::move(Mod);
+ return std::make_pair(std::move(Mod), Res);
}
Error LTO::linkRegularLTO(RegularLTOState::AddedModule Mod,
@@ -1032,19 +1060,19 @@ Error LTO::linkRegularLTO(RegularLTOState::AddedModule Mod,
}
// Add a ThinLTO module to the link.
-Error LTO::addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
- const SymbolResolution *&ResI,
- const SymbolResolution *ResE) {
- const SymbolResolution *ResITmp = ResI;
+Expected<ArrayRef<SymbolResolution>>
+LTO::addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+ ArrayRef<SymbolResolution> Res) {
+ ArrayRef<SymbolResolution> ResTmp = Res;
for (const InputFile::Symbol &Sym : Syms) {
- assert(ResITmp != ResE);
- SymbolResolution Res = *ResITmp++;
+ assert(!ResTmp.empty());
+ const SymbolResolution &R = ResTmp.consume_front();
if (!Sym.getIRName().empty()) {
auto GUID = GlobalValue::getGUIDAssumingExternalLinkage(
GlobalValue::getGlobalIdentifier(Sym.getIRName(),
GlobalValue::ExternalLinkage, ""));
- if (Res.Prevailing)
+ if (R.Prevailing)
ThinLTO.PrevailingModuleForGUID[GUID] = BM.getModuleIdentifier();
}
}
@@ -1059,14 +1087,14 @@ Error LTO::addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
LLVM_DEBUG(dbgs() << "Module " << BM.getModuleIdentifier() << "\n");
for (const InputFile::Symbol &Sym : Syms) {
- assert(ResI != ResE);
- SymbolResolution Res = *ResI++;
+ assert(!Res.empty());
+ const SymbolResolution &R = Res.consume_front();
if (!Sym.getIRName().empty()) {
auto GUID = GlobalValue::getGUIDAssumingExternalLinkage(
GlobalValue::getGlobalIdentifier(Sym.getIRName(),
GlobalValue::ExternalLinkage, ""));
- if (Res.Prevailing) {
+ if (R.Prevailing) {
assert(ThinLTO.PrevailingModuleForGUID[GUID] ==
BM.getModuleIdentifier());
@@ -1074,7 +1102,7 @@ Error LTO::addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
// switch the linkage to `weak` to prevent IPOs from happening.
// Find the summary in the module for this very GV and record the new
// linkage so that we can switch it when we import the GV.
- if (Res.LinkerRedefined)
+ if (R.LinkerRedefined)
if (auto S = ThinLTO.CombinedIndex.findSummaryInModule(
GUID, BM.getModuleIdentifier()))
S->setLinkage(GlobalValue::WeakAnyLinkage);
@@ -1082,7 +1110,7 @@ Error LTO::addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
// If the linker resolved the symbol to a local definition then mark it
// as local in the summary for the module we are adding.
- if (Res.FinalDefinitionInLinkageUnit) {
+ if (R.FinalDefinitionInLinkageUnit) {
if (auto S = ThinLTO.CombinedIndex.findSummaryInModule(
GUID, BM.getModuleIdentifier())) {
S->setDSOLocal(true);
@@ -1110,7 +1138,7 @@ Error LTO::addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
}
}
- return Error::success();
+ return Res;
}
unsigned LTO::getMaxTasks() const {
diff --git a/llvm/lib/MC/MCObjectFileInfo.cpp b/llvm/lib/MC/MCObjectFileInfo.cpp
index 0069d12..393eed1 100644
--- a/llvm/lib/MC/MCObjectFileInfo.cpp
+++ b/llvm/lib/MC/MCObjectFileInfo.cpp
@@ -537,6 +537,8 @@ void MCObjectFileInfo::initELFMCObjectFileInfo(const Triple &T, bool Large) {
EHFrameSection =
Ctx->getELFSection(".eh_frame", EHSectionType, EHSectionFlags);
+ CallGraphSection = Ctx->getELFSection(".callgraph", ELF::SHT_PROGBITS, 0);
+
StackSizesSection = Ctx->getELFSection(".stack_sizes", ELF::SHT_PROGBITS, 0);
PseudoProbeSection = Ctx->getELFSection(".pseudo_probe", DebugSecType, 0);
@@ -1121,6 +1123,24 @@ MCSection *MCObjectFileInfo::getDwarfComdatSection(const char *Name,
}
MCSection *
+MCObjectFileInfo::getCallGraphSection(const MCSection &TextSec) const {
+ if (Ctx->getObjectFileType() != MCContext::IsELF)
+ return CallGraphSection;
+
+ const MCSectionELF &ElfSec = static_cast<const MCSectionELF &>(TextSec);
+ unsigned Flags = ELF::SHF_LINK_ORDER;
+ StringRef GroupName;
+ if (const MCSymbol *Group = ElfSec.getGroup()) {
+ GroupName = Group->getName();
+ Flags |= ELF::SHF_GROUP;
+ }
+
+ return Ctx->getELFSection(".callgraph", ELF::SHT_PROGBITS, Flags, 0,
+ GroupName, true, ElfSec.getUniqueID(),
+ cast<MCSymbolELF>(TextSec.getBeginSymbol()));
+}
+
+MCSection *
MCObjectFileInfo::getStackSizesSection(const MCSection &TextSec) const {
if ((Ctx->getObjectFileType() != MCContext::IsELF) ||
Ctx->getTargetTriple().isPS4())
diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp
index e82393a..e277143 100644
--- a/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/llvm/lib/MC/MCObjectStreamer.cpp
@@ -46,83 +46,27 @@ MCAssembler *MCObjectStreamer::getAssemblerPtr() {
return nullptr;
}
-constexpr size_t FragBlockSize = 16384;
-// Ensure the new fragment can at least store a few bytes.
-constexpr size_t NewFragHeadroom = 8;
-
-static_assert(NewFragHeadroom >= alignof(MCFragment));
-static_assert(FragBlockSize >= sizeof(MCFragment) + NewFragHeadroom);
-
-MCFragment *MCObjectStreamer::allocFragSpace(size_t Headroom) {
- auto Size = std::max(FragBlockSize, sizeof(MCFragment) + Headroom);
- FragSpace = Size - sizeof(MCFragment);
- auto Chunk = std::unique_ptr<char[]>(new char[Size]);
- auto *F = reinterpret_cast<MCFragment *>(Chunk.get());
- FragStorage.push_back(std::move(Chunk));
- return F;
-}
-
void MCObjectStreamer::newFragment() {
- MCFragment *F;
- if (LLVM_LIKELY(sizeof(MCFragment) + NewFragHeadroom <= FragSpace)) {
- auto End = reinterpret_cast<size_t>(getCurFragEnd());
- F = reinterpret_cast<MCFragment *>(
- alignToPowerOf2(End, alignof(MCFragment)));
- FragSpace -= size_t(F) - End + sizeof(MCFragment);
- } else {
- F = allocFragSpace(0);
- }
- new (F) MCFragment();
- addFragment(F);
-}
-
-void MCObjectStreamer::ensureHeadroom(size_t Headroom) {
- if (Headroom <= FragSpace)
- return;
- auto *F = allocFragSpace(Headroom);
- new (F) MCFragment();
- addFragment(F);
+ addFragment(getContext().allocFragment<MCFragment>());
}
-void MCObjectStreamer::insert(MCFragment *Frag) {
- assert(Frag->getKind() != MCFragment::FT_Data &&
+void MCObjectStreamer::insert(MCFragment *F) {
+ assert(F->getKind() != MCFragment::FT_Data &&
"F should have a variable-size tail");
- // Frag is not connected to FragSpace. Before modifying CurFrag with
- // addFragment(Frag), allocate an empty fragment to maintain FragSpace
- // connectivity, potentially reusing CurFrag's associated space.
- MCFragment *F;
- if (LLVM_LIKELY(sizeof(MCFragment) + NewFragHeadroom <= FragSpace)) {
- auto End = reinterpret_cast<size_t>(getCurFragEnd());
- F = reinterpret_cast<MCFragment *>(
- alignToPowerOf2(End, alignof(MCFragment)));
- FragSpace -= size_t(F) - End + sizeof(MCFragment);
- } else {
- F = allocFragSpace(0);
- }
- new (F) MCFragment();
-
- addFragment(Frag);
addFragment(F);
+ newFragment();
}
void MCObjectStreamer::appendContents(ArrayRef<char> Contents) {
- ensureHeadroom(Contents.size());
- assert(FragSpace >= Contents.size());
- llvm::copy(Contents, getCurFragEnd());
- CurFrag->FixedSize += Contents.size();
- FragSpace -= Contents.size();
+ CurFrag->appendContents(Contents);
}
void MCObjectStreamer::appendContents(size_t Num, char Elt) {
- ensureHeadroom(Num);
- MutableArrayRef<char> Data(getCurFragEnd(), Num);
- llvm::fill(Data, Elt);
- CurFrag->FixedSize += Num;
- FragSpace -= Num;
+ CurFrag->appendContents(Num, Elt);
}
void MCObjectStreamer::addFixup(const MCExpr *Value, MCFixupKind Kind) {
- CurFrag->addFixup(MCFixup::create(getCurFragSize(), Value, Kind));
+ CurFrag->addFixup(MCFixup::create(CurFrag->getFixedSize(), Value, Kind));
}
// As a compile-time optimization, avoid allocating and evaluating an MCExpr
@@ -171,8 +115,6 @@ void MCObjectStreamer::reset() {
}
EmitEHFrame = true;
EmitDebugFrame = false;
- FragStorage.clear();
- FragSpace = 0;
MCStreamer::reset();
}
@@ -201,6 +143,7 @@ void MCObjectStreamer::emitCFISections(bool EH, bool Debug, bool SFrame) {
void MCObjectStreamer::emitValueImpl(const MCExpr *Value, unsigned Size,
SMLoc Loc) {
MCStreamer::emitValueImpl(Value, Size, Loc);
+ MCFragment *DF = getCurrentFragment();
MCDwarfLineEntry::make(this, getCurrentSectionOnly());
@@ -215,9 +158,9 @@ void MCObjectStreamer::emitValueImpl(const MCExpr *Value, unsigned Size,
emitIntValue(AbsValue, Size);
return;
}
- ensureHeadroom(Size);
- addFixup(Value, MCFixup::getDataKindForSize(Size));
- appendContents(Size, 0);
+ DF->addFixup(MCFixup::create(DF->getContents().size(), Value,
+ MCFixup::getDataKindForSize(Size)));
+ DF->appendContents(Size, 0);
}
MCSymbol *MCObjectStreamer::emitCFILabel() {
@@ -251,7 +194,7 @@ void MCObjectStreamer::emitLabel(MCSymbol *Symbol, SMLoc Loc) {
// section.
MCFragment *F = CurFrag;
Symbol->setFragment(F);
- Symbol->setOffset(F->getFixedSize());
+ Symbol->setOffset(F->getContents().size());
emitPendingAssignments(Symbol);
}
@@ -317,21 +260,6 @@ void MCObjectStreamer::changeSection(MCSection *Section, uint32_t Subsection) {
F0 = CurFrag;
}
- // To maintain connectivity between CurFrag and FragSpace when CurFrag is
- // modified, allocate an empty fragment and append it to the fragment list.
- // (Subsections[I].second.Tail is not connected to FragSpace.)
- MCFragment *F;
- if (LLVM_LIKELY(sizeof(MCFragment) + NewFragHeadroom <= FragSpace)) {
- auto End = reinterpret_cast<size_t>(getCurFragEnd());
- F = reinterpret_cast<MCFragment *>(
- alignToPowerOf2(End, alignof(MCFragment)));
- FragSpace -= size_t(F) - End + sizeof(MCFragment);
- } else {
- F = allocFragSpace(0);
- }
- new (F) MCFragment();
- F->setParent(Section);
-
auto &Subsections = Section->Subsections;
size_t I = 0, E = Subsections.size();
while (I != E && Subsections[I].first < Subsection)
@@ -339,16 +267,13 @@ void MCObjectStreamer::changeSection(MCSection *Section, uint32_t Subsection) {
// If the subsection number is not in the sorted Subsections list, create a
// new fragment list.
if (I == E || Subsections[I].first != Subsection) {
+ auto *F = getContext().allocFragment<MCFragment>();
+ F->setParent(Section);
Subsections.insert(Subsections.begin() + I,
{Subsection, MCSection::FragList{F, F}});
- Section->CurFragList = &Subsections[I].second;
- CurFrag = F;
- } else {
- Section->CurFragList = &Subsections[I].second;
- CurFrag = Subsections[I].second.Tail;
- // Ensure CurFrag is associated with FragSpace.
- addFragment(F);
}
+ Section->CurFragList = &Subsections[I].second;
+ CurFrag = Section->CurFragList->Tail;
// Define the section symbol at subsection 0's initial fragment if required.
if (!NewSec)
@@ -419,15 +344,11 @@ void MCObjectStreamer::emitInstToData(const MCInst &Inst,
MCFragment *F = getCurrentFragment();
// Append the instruction to the data fragment.
- size_t CodeOffset = getCurFragSize();
- SmallString<16> Content;
+ size_t CodeOffset = F->getContents().size();
SmallVector<MCFixup, 1> Fixups;
- getAssembler().getEmitter().encodeInstruction(Inst, Content, Fixups, STI);
- appendContents(Content);
- if (CurFrag != F) {
- F = CurFrag;
- CodeOffset = 0;
- }
+ getAssembler().getEmitter().encodeInstruction(
+ Inst, F->getContentsForAppending(), Fixups, STI);
+ F->doneAppending();
F->setHasInstructions(STI);
if (Fixups.empty())
diff --git a/llvm/lib/MC/MCWin64EH.cpp b/llvm/lib/MC/MCWin64EH.cpp
index a87648a..72a8dd7 100644
--- a/llvm/lib/MC/MCWin64EH.cpp
+++ b/llvm/lib/MC/MCWin64EH.cpp
@@ -318,9 +318,6 @@ static void EmitUnwindInfo(MCStreamer &streamer, WinEH::FrameInfo *info) {
// Emit the epilog instructions.
if (EnableUnwindV2) {
- // Ensure the fixups and appended content apply to the same fragment.
- OS->ensureHeadroom(info->EpilogMap.size() * 2);
-
bool IsLast = true;
for (const auto &Epilog : llvm::reverse(info->EpilogMap)) {
if (IsLast) {
diff --git a/llvm/lib/MC/MCWinCOFFStreamer.cpp b/llvm/lib/MC/MCWinCOFFStreamer.cpp
index 8be5054..1ffe25c 100644
--- a/llvm/lib/MC/MCWinCOFFStreamer.cpp
+++ b/llvm/lib/MC/MCWinCOFFStreamer.cpp
@@ -280,7 +280,6 @@ void MCWinCOFFStreamer::emitCOFFSymbolIndex(MCSymbol const *Symbol) {
void MCWinCOFFStreamer::emitCOFFSectionIndex(const MCSymbol *Symbol) {
visitUsedSymbol(*Symbol);
const MCSymbolRefExpr *SRE = MCSymbolRefExpr::create(Symbol, getContext());
- ensureHeadroom(2);
addFixup(SRE, FK_SecRel_2);
appendContents(2, 0);
}
@@ -294,7 +293,6 @@ void MCWinCOFFStreamer::emitCOFFSecRel32(const MCSymbol *Symbol,
if (Offset)
MCE = MCBinaryExpr::createAdd(
MCE, MCConstantExpr::create(Offset, getContext()), getContext());
- ensureHeadroom(4);
addFixup(MCE, FK_SecRel_4);
// Emit 4 bytes (zeros) to the object file.
appendContents(4, 0);
@@ -310,7 +308,6 @@ void MCWinCOFFStreamer::emitCOFFImgRel32(const MCSymbol *Symbol,
if (Offset)
MCE = MCBinaryExpr::createAdd(
MCE, MCConstantExpr::create(Offset, getContext()), getContext());
- ensureHeadroom(4);
addFixup(MCE, FK_Data_4);
// Emit 4 bytes (zeros) to the object file.
appendContents(4, 0);
@@ -321,7 +318,6 @@ void MCWinCOFFStreamer::emitCOFFSecNumber(MCSymbol const *Symbol) {
// Create Symbol for section number.
const MCExpr *MCE = MCCOFFSectionNumberTargetExpr::create(
*Symbol, this->getWriter(), getContext());
- ensureHeadroom(4);
addFixup(MCE, FK_Data_4);
// Emit 4 bytes (zeros) to the object file.
appendContents(4, 0);
@@ -332,7 +328,6 @@ void MCWinCOFFStreamer::emitCOFFSecOffset(MCSymbol const *Symbol) {
// Create Symbol for section offset.
const MCExpr *MCE =
MCCOFFSectionOffsetTargetExpr::create(*Symbol, getContext());
- ensureHeadroom(4);
addFixup(MCE, FK_Data_4);
// Emit 4 bytes (zeros) to the object file.
appendContents(4, 0);
diff --git a/llvm/lib/ObjCopy/COFF/COFFReader.cpp b/llvm/lib/ObjCopy/COFF/COFFReader.cpp
index 62a71d4..9b55f76 100644
--- a/llvm/lib/ObjCopy/COFF/COFFReader.cpp
+++ b/llvm/lib/ObjCopy/COFF/COFFReader.cpp
@@ -135,7 +135,7 @@ Error COFFReader::readSymbols(Object &Obj, bool IsBigObj) const {
// it is, find the target section unique id.
const coff_aux_section_definition *SD = SymRef.getSectionDefinition();
const coff_aux_weak_external *WE = SymRef.getWeakExternal();
- if (SD && SD->Selection == IMAGE_COMDAT_SELECT_ASSOCIATIVE) {
+ if (SD && SD->Selection == IMAGE_COMDAT_SELECT_ASSOCIATIVE && !Obj.IsPE) {
int32_t Index = SD->getNumber(IsBigObj);
if (Index <= 0 || static_cast<uint32_t>(Index - 1) >= Sections.size())
return createStringError(object_error::parse_failed,
diff --git a/llvm/lib/Object/ELFObjectFile.cpp b/llvm/lib/Object/ELFObjectFile.cpp
index 0919c6a..aff047c 100644
--- a/llvm/lib/Object/ELFObjectFile.cpp
+++ b/llvm/lib/Object/ELFObjectFile.cpp
@@ -688,11 +688,20 @@ StringRef ELFObjectFileBase::getNVPTXCPUName() const {
case ELF::EF_CUDA_SM100:
return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_100a"
: "sm_100";
+ case ELF::EF_CUDA_SM101:
+ return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_101a"
+ : "sm_101";
+ case ELF::EF_CUDA_SM103:
+ return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_103a"
+ : "sm_103";
// Rubin architecture.
case ELF::EF_CUDA_SM120:
return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_120a"
: "sm_120";
+ case ELF::EF_CUDA_SM121:
+ return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_121a"
+ : "sm_121";
default:
llvm_unreachable("Unknown EF_CUDA_SM value");
}
diff --git a/llvm/lib/Object/SFrameParser.cpp b/llvm/lib/Object/SFrameParser.cpp
index 2d74d1d..5863490 100644
--- a/llvm/lib/Object/SFrameParser.cpp
+++ b/llvm/lib/Object/SFrameParser.cpp
@@ -10,27 +10,41 @@
#include "llvm/BinaryFormat/SFrame.h"
#include "llvm/Object/Error.h"
#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/MathExtras.h"
using namespace llvm;
using namespace llvm::object;
-template <typename T>
-static Expected<const T &> getDataSliceAs(ArrayRef<uint8_t> Data,
- uint64_t Offset) {
- static_assert(std::is_trivial_v<T>);
- if (Data.size() < Offset + sizeof(T)) {
+static Expected<ArrayRef<uint8_t>>
+getDataSlice(ArrayRef<uint8_t> Data, uint64_t Offset, uint64_t Size) {
+ uint64_t End = SaturatingAdd(Offset, Size);
+ // Data.size() cannot be UINT64_MAX, as it would occupy the whole address
+ // space.
+ if (End > Data.size()) {
return createStringError(
formatv("unexpected end of data at offset {0:x} while reading [{1:x}, "
"{2:x})",
- Data.size(), Offset, Offset + sizeof(T))
+ Data.size(), Offset, End)
.str(),
object_error::unexpected_eof);
}
- return *reinterpret_cast<const T *>(Data.data() + Offset);
+ return Data.slice(Offset, Size);
+}
+
+template <typename T>
+static Expected<const T &> getDataSliceAs(ArrayRef<uint8_t> Data,
+ uint64_t Offset) {
+ static_assert(std::is_trivial_v<T>);
+ Expected<ArrayRef<uint8_t>> Slice = getDataSlice(Data, Offset, sizeof(T));
+ if (!Slice)
+ return Slice.takeError();
+
+ return *reinterpret_cast<const T *>(Slice->data());
}
template <endianness E>
-Expected<SFrameParser<E>> SFrameParser<E>::create(ArrayRef<uint8_t> Contents) {
+Expected<SFrameParser<E>> SFrameParser<E>::create(ArrayRef<uint8_t> Contents,
+ uint64_t SectionAddress) {
Expected<const sframe::Preamble<E> &> Preamble =
getDataSliceAs<sframe::Preamble<E>>(Contents, 0);
if (!Preamble)
@@ -48,8 +62,44 @@ Expected<SFrameParser<E>> SFrameParser<E>::create(ArrayRef<uint8_t> Contents) {
getDataSliceAs<sframe::Header<E>>(Contents, 0);
if (!Header)
return Header.takeError();
- return SFrameParser(Contents, *Header);
+ return SFrameParser(Contents, SectionAddress, *Header);
+}
+
+template <endianness E>
+Expected<ArrayRef<uint8_t>> SFrameParser<E>::getAuxHeader() const {
+ return getDataSlice(Data, sizeof(Header), Header.AuxHdrLen);
+}
+
+template <endianness E>
+Expected<ArrayRef<sframe::FuncDescEntry<E>>> SFrameParser<E>::fdes() const {
+ Expected<ArrayRef<uint8_t>> Slice = getDataSlice(
+ Data, getFDEBase(), Header.NumFDEs * sizeof(sframe::FuncDescEntry<E>));
+ if (!Slice)
+ return Slice.takeError();
+ return ArrayRef(
+ reinterpret_cast<const sframe::FuncDescEntry<E> *>(Slice->data()),
+ Header.NumFDEs);
+}
+
+template <endianness E>
+uint64_t SFrameParser<E>::getAbsoluteStartAddress(
+ typename FDERange::iterator FDE) const {
+ uint64_t Result = SectionAddress + FDE->StartAddress;
+
+ if ((getPreamble().Flags.value() & sframe::Flags::FDEFuncStartPCRel) ==
+ sframe::Flags::FDEFuncStartPCRel) {
+ uintptr_t DataPtr = reinterpret_cast<uintptr_t>(Data.data());
+ uintptr_t FDEPtr = reinterpret_cast<uintptr_t>(&*FDE);
+
+ assert(DataPtr <= FDEPtr && FDEPtr < DataPtr + Data.size() &&
+ "Iterator does not belong to this object!");
+
+ Result += FDEPtr - DataPtr;
+ }
+
+ return Result;
}
-template class llvm::object::SFrameParser<endianness::big>;
-template class llvm::object::SFrameParser<endianness::little>;
+template class LLVM_EXPORT_TEMPLATE llvm::object::SFrameParser<endianness::big>;
+template class LLVM_EXPORT_TEMPLATE
+ llvm::object::SFrameParser<endianness::little>;
diff --git a/llvm/lib/ObjectYAML/ELFEmitter.cpp b/llvm/lib/ObjectYAML/ELFEmitter.cpp
index 6de87a8..bc5c68d 100644
--- a/llvm/lib/ObjectYAML/ELFEmitter.cpp
+++ b/llvm/lib/ObjectYAML/ELFEmitter.cpp
@@ -481,7 +481,11 @@ void ELFState<ELFT>::writeELFHeader(raw_ostream &OS) {
Header.e_version = EV_CURRENT;
Header.e_entry = Doc.Header.Entry;
- Header.e_flags = Doc.Header.Flags;
+ if (Doc.Header.Flags)
+ Header.e_flags = *Doc.Header.Flags;
+ else
+ Header.e_flags = 0;
+
Header.e_ehsize = sizeof(Elf_Ehdr);
if (Doc.Header.EPhOff)
diff --git a/llvm/lib/ObjectYAML/ELFYAML.cpp b/llvm/lib/ObjectYAML/ELFYAML.cpp
index 7fcabb68..c27339d 100644
--- a/llvm/lib/ObjectYAML/ELFYAML.cpp
+++ b/llvm/lib/ObjectYAML/ELFYAML.cpp
@@ -1160,7 +1160,7 @@ void MappingTraits<ELFYAML::FileHeader>::mapping(IO &IO,
IO.mapOptional("ABIVersion", FileHdr.ABIVersion, Hex8(0));
IO.mapRequired("Type", FileHdr.Type);
IO.mapOptional("Machine", FileHdr.Machine);
- IO.mapOptional("Flags", FileHdr.Flags, ELFYAML::ELF_EF(0));
+ IO.mapOptional("Flags", FileHdr.Flags);
IO.mapOptional("Entry", FileHdr.Entry, Hex64(0));
IO.mapOptional("SectionHeaderStringTable", FileHdr.SectionHeaderStringTable);
diff --git a/llvm/lib/ProfileData/MemProfReader.cpp b/llvm/lib/ProfileData/MemProfReader.cpp
index 235b134..3fc0dbf 100644
--- a/llvm/lib/ProfileData/MemProfReader.cpp
+++ b/llvm/lib/ProfileData/MemProfReader.cpp
@@ -135,7 +135,7 @@ readMemInfoBlocksV3(const char *Ptr) {
}
llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>>
-readMemInfoBlocksV4(const char *Ptr) {
+readMemInfoBlocksCommon(const char *Ptr, bool IsHistogramEncoded = false) {
using namespace support;
const uint64_t NumItemsToRead =
@@ -145,27 +145,74 @@ readMemInfoBlocksV4(const char *Ptr) {
for (uint64_t I = 0; I < NumItemsToRead; I++) {
const uint64_t Id =
endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
- // We cheat a bit here and remove the const from cast to set the
- // Histogram Pointer to newly allocated buffer.
- MemInfoBlock MIB = *reinterpret_cast<const MemInfoBlock *>(Ptr);
- // Only increment by size of MIB since readNext implicitly increments.
- Ptr += sizeof(MemInfoBlock);
+ MemInfoBlock MIB;
+#define READ_MIB_FIELD(FIELD) \
+ MIB.FIELD = endian::readNext<decltype(MIB.FIELD), llvm::endianness::little, \
+ unaligned>(Ptr)
+
+ READ_MIB_FIELD(AllocCount);
+ READ_MIB_FIELD(TotalAccessCount);
+ READ_MIB_FIELD(MinAccessCount);
+ READ_MIB_FIELD(MaxAccessCount);
+ READ_MIB_FIELD(TotalSize);
+ READ_MIB_FIELD(MinSize);
+ READ_MIB_FIELD(MaxSize);
+ READ_MIB_FIELD(AllocTimestamp);
+ READ_MIB_FIELD(DeallocTimestamp);
+ READ_MIB_FIELD(TotalLifetime);
+ READ_MIB_FIELD(MinLifetime);
+ READ_MIB_FIELD(MaxLifetime);
+ READ_MIB_FIELD(AllocCpuId);
+ READ_MIB_FIELD(DeallocCpuId);
+ READ_MIB_FIELD(NumMigratedCpu);
+ READ_MIB_FIELD(NumLifetimeOverlaps);
+ READ_MIB_FIELD(NumSameAllocCpu);
+ READ_MIB_FIELD(NumSameDeallocCpu);
+ READ_MIB_FIELD(DataTypeId);
+ READ_MIB_FIELD(TotalAccessDensity);
+ READ_MIB_FIELD(MinAccessDensity);
+ READ_MIB_FIELD(MaxAccessDensity);
+ READ_MIB_FIELD(TotalLifetimeAccessDensity);
+ READ_MIB_FIELD(MinLifetimeAccessDensity);
+ READ_MIB_FIELD(MaxLifetimeAccessDensity);
+ READ_MIB_FIELD(AccessHistogramSize);
+ READ_MIB_FIELD(AccessHistogram);
+#undef READ_MIB_FIELD
if (MIB.AccessHistogramSize > 0) {
+ // The in-memory representation uses uint64_t for histogram entries.
MIB.AccessHistogram =
(uintptr_t)malloc(MIB.AccessHistogramSize * sizeof(uint64_t));
- }
-
- for (uint64_t J = 0; J < MIB.AccessHistogramSize; J++) {
- ((uint64_t *)MIB.AccessHistogram)[J] =
- endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
+ for (uint64_t J = 0; J < MIB.AccessHistogramSize; J++) {
+ if (!IsHistogramEncoded) {
+ ((uint64_t *)MIB.AccessHistogram)[J] =
+ endian::readNext<uint64_t, llvm::endianness::little, unaligned>(
+ Ptr);
+ } else {
+ // The encoded on-disk format (V5 onwards) uses uint16_t.
+ const uint16_t Val =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(
+ Ptr);
+ ((uint64_t *)MIB.AccessHistogram)[J] = decodeHistogramCount(Val);
+ }
+ }
}
Items.push_back({Id, MIB});
}
return Items;
}
+llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>>
+readMemInfoBlocksV4(const char *Ptr) {
+ return readMemInfoBlocksCommon(Ptr);
+}
+
+llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>>
+readMemInfoBlocksV5(const char *Ptr) {
+ return readMemInfoBlocksCommon(Ptr, /*IsHistogramEncoded=*/true);
+}
+
CallStackMap readStackInfo(const char *Ptr) {
using namespace support;
@@ -658,6 +705,8 @@ RawMemProfReader::readMemInfoBlocks(const char *Ptr) {
return readMemInfoBlocksV3(Ptr);
if (MemprofRawVersion == 4ULL)
return readMemInfoBlocksV4(Ptr);
+ if (MemprofRawVersion == 5ULL)
+ return readMemInfoBlocksV5(Ptr);
llvm_unreachable(
"Panic: Unsupported version number when reading MemInfoBlocks");
}
diff --git a/llvm/lib/Support/BLAKE3/CMakeLists.txt b/llvm/lib/Support/BLAKE3/CMakeLists.txt
index eae2b02..90311ae 100644
--- a/llvm/lib/Support/BLAKE3/CMakeLists.txt
+++ b/llvm/lib/Support/BLAKE3/CMakeLists.txt
@@ -26,7 +26,8 @@ endmacro()
if (CAN_USE_ASSEMBLER)
if (MSVC)
check_symbol_exists(_M_X64 "" IS_X64)
- if (IS_X64)
+ check_symbol_exists(_M_ARM64EC "" IS_ARM64EC)
+ if (IS_X64 AND NOT IS_ARM64EC)
enable_language(ASM_MASM)
set(LLVM_BLAKE3_ASM_FILES
blake3_sse2_x86-64_windows_msvc.asm
diff --git a/llvm/lib/Support/FileCollector.cpp b/llvm/lib/Support/FileCollector.cpp
index 29436f8..edb5313 100644
--- a/llvm/lib/Support/FileCollector.cpp
+++ b/llvm/lib/Support/FileCollector.cpp
@@ -313,5 +313,6 @@ private:
IntrusiveRefCntPtr<vfs::FileSystem>
FileCollector::createCollectorVFS(IntrusiveRefCntPtr<vfs::FileSystem> BaseFS,
std::shared_ptr<FileCollector> Collector) {
- return new FileCollectorFileSystem(std::move(BaseFS), std::move(Collector));
+ return makeIntrusiveRefCnt<FileCollectorFileSystem>(std::move(BaseFS),
+ std::move(Collector));
}
diff --git a/llvm/lib/Support/Unix/Path.inc b/llvm/lib/Support/Unix/Path.inc
index 277247e..cc02cae 100644
--- a/llvm/lib/Support/Unix/Path.inc
+++ b/llvm/lib/Support/Unix/Path.inc
@@ -1190,7 +1190,7 @@ Expected<size_t> readNativeFile(file_t FD, MutableArrayRef<char> Buf) {
size_t Size = Buf.size();
#endif
ssize_t NumRead = sys::RetryAfterSignal(-1, ::read, FD, Buf.data(), Size);
- if (ssize_t(NumRead) == -1)
+ if (NumRead == -1)
return errorCodeToError(errnoAsErrorCode());
// The underlying operation on these platforms allow opening directories
// for reading in more cases than other platforms.
diff --git a/llvm/lib/Support/VirtualFileSystem.cpp b/llvm/lib/Support/VirtualFileSystem.cpp
index e489282..5d42488 100644
--- a/llvm/lib/Support/VirtualFileSystem.cpp
+++ b/llvm/lib/Support/VirtualFileSystem.cpp
@@ -397,7 +397,8 @@ void RealFileSystem::printImpl(raw_ostream &OS, PrintType Type,
}
IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
- static IntrusiveRefCntPtr<FileSystem> FS(new RealFileSystem(true));
+ static IntrusiveRefCntPtr<FileSystem> FS =
+ makeIntrusiveRefCnt<RealFileSystem>(true);
return FS;
}
@@ -2217,9 +2218,9 @@ RedirectingFileSystem::create(std::unique_ptr<MemoryBuffer> Buffer,
std::unique_ptr<RedirectingFileSystem> RedirectingFileSystem::create(
ArrayRef<std::pair<std::string, std::string>> RemappedFiles,
- bool UseExternalNames, FileSystem &ExternalFS) {
+ bool UseExternalNames, llvm::IntrusiveRefCntPtr<FileSystem> ExternalFS) {
std::unique_ptr<RedirectingFileSystem> FS(
- new RedirectingFileSystem(&ExternalFS));
+ new RedirectingFileSystem(ExternalFS));
FS->UseExternalNames = UseExternalNames;
StringMap<RedirectingFileSystem::Entry *> Entries;
@@ -2228,7 +2229,7 @@ std::unique_ptr<RedirectingFileSystem> RedirectingFileSystem::create(
SmallString<128> From = StringRef(Mapping.first);
SmallString<128> To = StringRef(Mapping.second);
{
- auto EC = ExternalFS.makeAbsolute(From);
+ auto EC = ExternalFS->makeAbsolute(From);
(void)EC;
assert(!EC && "Could not make absolute path");
}
@@ -2250,7 +2251,7 @@ std::unique_ptr<RedirectingFileSystem> RedirectingFileSystem::create(
}
assert(Parent && "File without a directory?");
{
- auto EC = ExternalFS.makeAbsolute(To);
+ auto EC = ExternalFS->makeAbsolute(To);
(void)EC;
assert(!EC && "Could not make absolute path");
}
diff --git a/llvm/lib/Support/Windows/Threading.inc b/llvm/lib/Support/Windows/Threading.inc
index d862dbd..b11f216 100644
--- a/llvm/lib/Support/Windows/Threading.inc
+++ b/llvm/lib/Support/Windows/Threading.inc
@@ -106,7 +106,69 @@ void llvm::get_thread_name(SmallVectorImpl<char> &Name) {
Name.clear();
}
+namespace llvm::sys::windows {
+HMODULE loadSystemModuleSecure(LPCWSTR lpModuleName) {
+ // Ensure we load indeed a module from system32 path.
+ // As per GetModuleHandle documentation:
+ // "If lpModuleName does not include a path and there is more than one loaded
+ // module with the same base name and extension, you cannot predict which
+ // module handle will be returned.". This mitigates
+ // https://learn.microsoft.com/en-us/security-updates/securityadvisories/2010/2269637
+ SmallVector<wchar_t, MAX_PATH> Buf;
+ size_t Size = MAX_PATH;
+ do {
+ Buf.resize_for_overwrite(Size);
+ SetLastError(NO_ERROR);
+ Size = ::GetSystemDirectoryW(Buf.data(), Buf.size());
+ if (Size == 0)
+ return NULL;
+
+ // Try again with larger buffer.
+ } while (Size > Buf.size());
+
+ Buf.truncate(Size);
+ Buf.push_back(L'\\');
+ Buf.append(lpModuleName, lpModuleName + std::wcslen(lpModuleName));
+ Buf.push_back(0);
+
+ return ::GetModuleHandleW(Buf.data());
+}
+} // namespace llvm::sys::windows
+
SetThreadPriorityResult llvm::set_thread_priority(ThreadPriority Priority) {
+#ifdef THREAD_POWER_THROTTLING_CURRENT_VERSION
+ HMODULE kernelM = llvm::sys::windows::loadSystemModuleSecure(L"kernel32.dll");
+ if (kernelM) {
+ // SetThreadInformation is only available on Windows 8 and later. Since we
+ // still support compilation on Windows 7, we load the function dynamically.
+ typedef BOOL(WINAPI * SetThreadInformation_t)(
+ HANDLE hThread, THREAD_INFORMATION_CLASS ThreadInformationClass,
+ _In_reads_bytes_(ThreadInformationSize) PVOID ThreadInformation,
+ ULONG ThreadInformationSize);
+ static const auto pfnSetThreadInformation =
+ (SetThreadInformation_t)::GetProcAddress(kernelM,
+ "SetThreadInformation");
+ if (pfnSetThreadInformation) {
+ auto setThreadInformation = [](ULONG ControlMaskAndStateMask) {
+ THREAD_POWER_THROTTLING_STATE state{};
+ state.Version = THREAD_POWER_THROTTLING_CURRENT_VERSION;
+ state.ControlMask = ControlMaskAndStateMask;
+ state.StateMask = ControlMaskAndStateMask;
+ return pfnSetThreadInformation(
+ ::GetCurrentThread(), ThreadPowerThrottling, &state, sizeof(state));
+ };
+
+ // Use EcoQoS for ThreadPriority::Background available (running on most
+ // efficent cores at the most efficient cpu frequency):
+ // https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setthreadinformation
+ // https://learn.microsoft.com/en-us/windows/win32/procthread/quality-of-service
+ setThreadInformation(Priority == ThreadPriority::Background
+ ? THREAD_POWER_THROTTLING_EXECUTION_SPEED
+ : 0);
+ }
+ }
+#endif
+
// https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-setthreadpriority
// Begin background processing mode. The system lowers the resource scheduling
// priorities of the thread so that it can perform background work without
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index ca09598..99f0af5 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -39,8 +39,8 @@ let Predicates = [HasDotProd] in {
def ext_addv_to_udot_addv : GICombineRule<
(defs root:$root, ext_addv_to_udot_addv_matchinfo:$matchinfo),
(match (wip_match_opcode G_VECREDUCE_ADD):$root,
- [{ return matchExtAddvToUdotAddv(*${root}, MRI, STI, ${matchinfo}); }]),
- (apply [{ applyExtAddvToUdotAddv(*${root}, MRI, B, Observer, STI, ${matchinfo}); }])
+ [{ return matchExtAddvToDotAddv(*${root}, MRI, STI, ${matchinfo}); }]),
+ (apply [{ applyExtAddvToDotAddv(*${root}, MRI, B, Observer, STI, ${matchinfo}); }])
>;
}
@@ -62,8 +62,10 @@ class push_opcode_through_ext<Instruction opcode, Instruction extOpcode> : GICom
def push_sub_through_zext : push_opcode_through_ext<G_SUB, G_ZEXT>;
def push_add_through_zext : push_opcode_through_ext<G_ADD, G_ZEXT>;
+def push_mul_through_zext : push_opcode_through_ext<G_MUL, G_ZEXT>;
def push_sub_through_sext : push_opcode_through_ext<G_SUB, G_SEXT>;
def push_add_through_sext : push_opcode_through_ext<G_ADD, G_SEXT>;
+def push_mul_through_sext : push_opcode_through_ext<G_MUL, G_SEXT>;
def AArch64PreLegalizerCombiner: GICombiner<
"AArch64PreLegalizerCombinerImpl", [all_combines,
@@ -75,8 +77,10 @@ def AArch64PreLegalizerCombiner: GICombiner<
ext_uaddv_to_uaddlv,
push_sub_through_zext,
push_add_through_zext,
+ push_mul_through_zext,
push_sub_through_sext,
- push_add_through_sext]> {
+ push_add_through_sext,
+ push_mul_through_sext]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index eca7ca5..ad42f4b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -5296,7 +5296,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
}
case Intrinsic::aarch64_sve_ld1_pn_x2: {
if (VT == MVT::nxv16i8) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(
Node, 2, 0, AArch64::LD1B_2Z_IMM_PSEUDO, AArch64::LD1B_2Z_PSEUDO);
else if (Subtarget->hasSVE2p1())
@@ -5307,7 +5307,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
return;
} else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
VT == MVT::nxv8bf16) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(
Node, 2, 1, AArch64::LD1H_2Z_IMM_PSEUDO, AArch64::LD1H_2Z_PSEUDO);
else if (Subtarget->hasSVE2p1())
@@ -5317,7 +5317,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
break;
return;
} else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(
Node, 2, 2, AArch64::LD1W_2Z_IMM_PSEUDO, AArch64::LD1W_2Z_PSEUDO);
else if (Subtarget->hasSVE2p1())
@@ -5327,7 +5327,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
break;
return;
} else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(
Node, 2, 3, AArch64::LD1D_2Z_IMM_PSEUDO, AArch64::LD1D_2Z_PSEUDO);
else if (Subtarget->hasSVE2p1())
@@ -5341,7 +5341,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
}
case Intrinsic::aarch64_sve_ld1_pn_x4: {
if (VT == MVT::nxv16i8) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(
Node, 4, 0, AArch64::LD1B_4Z_IMM_PSEUDO, AArch64::LD1B_4Z_PSEUDO);
else if (Subtarget->hasSVE2p1())
@@ -5352,7 +5352,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
return;
} else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
VT == MVT::nxv8bf16) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(
Node, 4, 1, AArch64::LD1H_4Z_IMM_PSEUDO, AArch64::LD1H_4Z_PSEUDO);
else if (Subtarget->hasSVE2p1())
@@ -5362,7 +5362,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
break;
return;
} else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(
Node, 4, 2, AArch64::LD1W_4Z_IMM_PSEUDO, AArch64::LD1W_4Z_PSEUDO);
else if (Subtarget->hasSVE2p1())
@@ -5372,7 +5372,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
break;
return;
} else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(
Node, 4, 3, AArch64::LD1D_4Z_IMM_PSEUDO, AArch64::LD1D_4Z_PSEUDO);
else if (Subtarget->hasSVE2p1())
@@ -5386,7 +5386,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
}
case Intrinsic::aarch64_sve_ldnt1_pn_x2: {
if (VT == MVT::nxv16i8) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(Node, 2, 0,
AArch64::LDNT1B_2Z_IMM_PSEUDO,
AArch64::LDNT1B_2Z_PSEUDO);
@@ -5398,7 +5398,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
return;
} else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
VT == MVT::nxv8bf16) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(Node, 2, 1,
AArch64::LDNT1H_2Z_IMM_PSEUDO,
AArch64::LDNT1H_2Z_PSEUDO);
@@ -5409,7 +5409,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
break;
return;
} else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(Node, 2, 2,
AArch64::LDNT1W_2Z_IMM_PSEUDO,
AArch64::LDNT1W_2Z_PSEUDO);
@@ -5420,7 +5420,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
break;
return;
} else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(Node, 2, 3,
AArch64::LDNT1D_2Z_IMM_PSEUDO,
AArch64::LDNT1D_2Z_PSEUDO);
@@ -5435,7 +5435,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
}
case Intrinsic::aarch64_sve_ldnt1_pn_x4: {
if (VT == MVT::nxv16i8) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(Node, 4, 0,
AArch64::LDNT1B_4Z_IMM_PSEUDO,
AArch64::LDNT1B_4Z_PSEUDO);
@@ -5447,7 +5447,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
return;
} else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
VT == MVT::nxv8bf16) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(Node, 4, 1,
AArch64::LDNT1H_4Z_IMM_PSEUDO,
AArch64::LDNT1H_4Z_PSEUDO);
@@ -5458,7 +5458,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
break;
return;
} else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(Node, 4, 2,
AArch64::LDNT1W_4Z_IMM_PSEUDO,
AArch64::LDNT1W_4Z_PSEUDO);
@@ -5469,7 +5469,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
break;
return;
} else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
- if (Subtarget->hasSME2())
+ if (Subtarget->hasSME2() && Subtarget->isStreaming())
SelectContiguousMultiVectorLoad(Node, 4, 3,
AArch64::LDNT1D_4Z_IMM_PSEUDO,
AArch64::LDNT1D_4Z_PSEUDO);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7c9fc67..4f6e3dd 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -8952,6 +8952,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
bool &IsTailCall = CLI.IsTailCall;
CallingConv::ID &CallConv = CLI.CallConv;
bool IsVarArg = CLI.IsVarArg;
+ const CallBase *CB = CLI.CB;
MachineFunction &MF = DAG.getMachineFunction();
MachineFunction::CallSiteInfo CSInfo;
@@ -8991,6 +8992,10 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
*DAG.getContext());
RetCCInfo.AnalyzeCallResult(Ins, RetCC);
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
+
// Check callee args/returns for SVE registers and set calling convention
// accordingly.
if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) {
@@ -11386,6 +11391,22 @@ SDValue AArch64TargetLowering::LowerSELECT_CC(
return DAG.getNode(ISD::AND, DL, VT, LHS, Shift);
}
+ // Canonicalise absolute difference patterns:
+ // select_cc lhs, rhs, sub(lhs, rhs), sub(rhs, lhs), cc ->
+ // select_cc lhs, rhs, sub(lhs, rhs), neg(sub(lhs, rhs)), cc
+ //
+ // select_cc lhs, rhs, sub(rhs, lhs), sub(lhs, rhs), cc ->
+ // select_cc lhs, rhs, neg(sub(lhs, rhs)), sub(lhs, rhs), cc
+ // The second forms can be matched into subs+cneg.
+ if (TVal.getOpcode() == ISD::SUB && FVal.getOpcode() == ISD::SUB) {
+ if (TVal.getOperand(0) == LHS && TVal.getOperand(1) == RHS &&
+ FVal.getOperand(0) == RHS && FVal.getOperand(1) == LHS)
+ FVal = DAG.getNegative(TVal, DL, TVal.getValueType());
+ else if (TVal.getOperand(0) == RHS && TVal.getOperand(1) == LHS &&
+ FVal.getOperand(0) == LHS && FVal.getOperand(1) == RHS)
+ TVal = DAG.getNegative(FVal, DL, FVal.getValueType());
+ }
+
unsigned Opcode = AArch64ISD::CSEL;
// If both the TVal and the FVal are constants, see if we can swap them in
@@ -24112,6 +24133,60 @@ static SDValue combineBoolVectorAndTruncateStore(SelectionDAG &DAG,
Store->getMemOperand());
}
+// Combine store (fp_to_int X) to use vector semantics around the conversion
+// when NEON is available. This allows us to store the in-vector result directly
+// without transferring the result into a GPR in the process.
+static SDValue combineStoreValueFPToInt(StoreSDNode *ST,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG,
+ const AArch64Subtarget *Subtarget) {
+ // Limit to post-legalization in order to avoid peeling truncating stores.
+ if (DCI.isBeforeLegalize())
+ return SDValue();
+ if (!Subtarget->isNeonAvailable())
+ return SDValue();
+ // Source operand is already a vector.
+ SDValue Value = ST->getValue();
+ if (Value.getValueType().isVector())
+ return SDValue();
+
+ // Look through potential assertions.
+ while (Value->isAssert())
+ Value = Value.getOperand(0);
+
+ if (Value.getOpcode() != ISD::FP_TO_SINT &&
+ Value.getOpcode() != ISD::FP_TO_UINT)
+ return SDValue();
+ if (!Value->hasOneUse())
+ return SDValue();
+
+ SDValue FPSrc = Value.getOperand(0);
+ EVT SrcVT = FPSrc.getValueType();
+ if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
+ return SDValue();
+
+ // No support for assignments such as i64 = fp_to_sint i32
+ EVT VT = Value.getSimpleValueType();
+ if (VT != SrcVT.changeTypeToInteger())
+ return SDValue();
+
+ // Create a 128-bit element vector to avoid widening. The floating point
+ // conversion is transformed into a single element conversion via a pattern.
+ unsigned NumElements = 128 / SrcVT.getFixedSizeInBits();
+ EVT VecSrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumElements);
+ EVT VecDstVT = VecSrcVT.changeTypeToInteger();
+ SDLoc DL(ST);
+ SDValue VecFP = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, FPSrc);
+ SDValue VecConv = DAG.getNode(Value.getOpcode(), DL, VecDstVT, VecFP);
+
+ SDValue Zero = DAG.getVectorIdxConstant(0, DL);
+ SDValue Extracted =
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VecConv, Zero);
+
+ DCI.CombineTo(ST->getValue().getNode(), Extracted);
+ return SDValue(ST, 0);
+}
+
bool isHalvingTruncateOfLegalScalableType(EVT SrcVT, EVT DstVT) {
return (SrcVT == MVT::nxv8i16 && DstVT == MVT::nxv8i8) ||
(SrcVT == MVT::nxv4i32 && DstVT == MVT::nxv4i16) ||
@@ -24194,6 +24269,9 @@ static SDValue performSTORECombine(SDNode *N,
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDLoc DL(ST);
+ if (SDValue Res = combineStoreValueFPToInt(ST, DCI, DAG, Subtarget))
+ return Res;
+
auto hasValidElementTypeForFPTruncStore = [](EVT VT) {
EVT EltVT = VT.getVectorElementType();
return EltVT == MVT::f32 || EltVT == MVT::f64;
@@ -26926,6 +27004,23 @@ static SDValue performSHLCombine(SDNode *N,
return DAG.getNode(ISD::AND, DL, VT, NewShift, NewRHS);
}
+static SDValue performRNDRCombine(SDNode *N, SelectionDAG &DAG) {
+ unsigned IntrinsicID = N->getConstantOperandVal(1);
+ auto Register =
+ (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR
+ : AArch64SysReg::RNDRRS);
+ SDLoc DL(N);
+ SDValue A = DAG.getNode(
+ AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, FlagsVT, MVT::Other),
+ N->getOperand(0), DAG.getConstant(Register, DL, MVT::i32));
+ SDValue B = DAG.getNode(
+ AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32),
+ DAG.getConstant(0, DL, MVT::i32),
+ DAG.getConstant(AArch64CC::NE, DL, MVT::i32), A.getValue(1));
+ return DAG.getMergeValues(
+ {A, DAG.getZExtOrTrunc(B, DL, MVT::i1), A.getValue(2)}, DL);
+}
+
SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -27241,22 +27336,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED);
case Intrinsic::aarch64_rndr:
- case Intrinsic::aarch64_rndrrs: {
- unsigned IntrinsicID = N->getConstantOperandVal(1);
- auto Register =
- (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR
- : AArch64SysReg::RNDRRS);
- SDLoc DL(N);
- SDValue A = DAG.getNode(
- AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, FlagsVT, MVT::Other),
- N->getOperand(0), DAG.getConstant(Register, DL, MVT::i32));
- SDValue B = DAG.getNode(
- AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32),
- DAG.getConstant(0, DL, MVT::i32),
- DAG.getConstant(AArch64CC::NE, DL, MVT::i32), A.getValue(1));
- return DAG.getMergeValues(
- {A, DAG.getZExtOrTrunc(B, DL, MVT::i1), A.getValue(2)}, DL);
- }
+ case Intrinsic::aarch64_rndrrs:
+ return performRNDRCombine(N, DAG);
case Intrinsic::aarch64_sme_ldr_zt:
return DAG.getNode(AArch64ISD::RESTORE_ZT, SDLoc(N),
DAG.getVTList(MVT::Other), N->getOperand(0),
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 07cacfa..251fd44 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6668,6 +6668,15 @@ def : Pat<(f16 (any_uint_to_fp (i32 (any_fp_to_uint f16:$Rn)))),
(UCVTFv1i16 (f16 (FCVTZUv1f16 f16:$Rn)))>;
}
+def : Pat<(v4i32 (any_fp_to_sint (v4f32 (scalar_to_vector (f32 FPR32:$src))))),
+ (v4i32 (INSERT_SUBREG (IMPLICIT_DEF), (i32 (FCVTZSv1i32 (f32 FPR32:$src))), ssub))>;
+def : Pat<(v4i32 (any_fp_to_uint (v4f32 (scalar_to_vector (f32 FPR32:$src))))),
+ (v4i32 (INSERT_SUBREG (IMPLICIT_DEF), (i32 (FCVTZUv1i32 (f32 FPR32:$src))), ssub))>;
+def : Pat<(v2i64 (any_fp_to_sint (v2f64 (scalar_to_vector (f64 FPR64:$src))))),
+ (v2i64 (INSERT_SUBREG (IMPLICIT_DEF), (i64 (FCVTZSv1i64 (f64 FPR64:$src))), dsub))>;
+def : Pat<(v2i64 (any_fp_to_uint (v2f64 (scalar_to_vector (f64 FPR64:$src))))),
+ (v2i64 (INSERT_SUBREG (IMPLICIT_DEF), (i64 (FCVTZUv1i64 (f64 FPR64:$src))), dsub))>;
+
// int -> float conversion of value in lane 0 of simd vector should use
// correct cvtf variant to avoid costly fpr <-> gpr register transfers.
def : Pat<(f32 (sint_to_fp (i32 (vector_extract (v4i32 FPR128:$Rn), (i64 0))))),
diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
index abcd550..b97d622 100644
--- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
@@ -12,7 +12,7 @@
// MOVi64imm + ANDS?Xrr ==> ANDXri + ANDS?Xri
//
// 2. MOVi32imm + ADDWrr ==> ADDWRi + ADDWRi
-// MOVi64imm + ADDXrr ==> ANDXri + ANDXri
+// MOVi64imm + ADDXrr ==> ADDXri + ADDXri
//
// 3. MOVi32imm + SUBWrr ==> SUBWRi + SUBWRi
// MOVi64imm + SUBXrr ==> SUBXri + SUBXri
@@ -125,8 +125,13 @@ struct AArch64MIPeepholeOpt : public MachineFunctionPass {
template <typename T>
bool visitADDSSUBS(OpcodePair PosOpcs, OpcodePair NegOpcs, MachineInstr &MI);
+ // Strategy used to split logical immediate bitmasks.
+ enum class SplitStrategy {
+ Intersect,
+ };
template <typename T>
- bool visitAND(unsigned Opc, MachineInstr &MI, unsigned OtherOpc = 0);
+ bool trySplitLogicalImm(unsigned Opc, MachineInstr &MI,
+ SplitStrategy Strategy, unsigned OtherOpc = 0);
bool visitORR(MachineInstr &MI);
bool visitCSEL(MachineInstr &MI);
bool visitINSERT(MachineInstr &MI);
@@ -158,14 +163,6 @@ INITIALIZE_PASS(AArch64MIPeepholeOpt, "aarch64-mi-peephole-opt",
template <typename T>
static bool splitBitmaskImm(T Imm, unsigned RegSize, T &Imm1Enc, T &Imm2Enc) {
T UImm = static_cast<T>(Imm);
- if (AArch64_AM::isLogicalImmediate(UImm, RegSize))
- return false;
-
- // If this immediate can be handled by one instruction, do not split it.
- SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
- AArch64_IMM::expandMOVImm(UImm, RegSize, Insn);
- if (Insn.size() == 1)
- return false;
// The bitmask immediate consists of consecutive ones. Let's say there is
// constant 0b00000000001000000000010000000000 which does not consist of
@@ -194,8 +191,9 @@ static bool splitBitmaskImm(T Imm, unsigned RegSize, T &Imm1Enc, T &Imm2Enc) {
}
template <typename T>
-bool AArch64MIPeepholeOpt::visitAND(unsigned Opc, MachineInstr &MI,
- unsigned OtherOpc) {
+bool AArch64MIPeepholeOpt::trySplitLogicalImm(unsigned Opc, MachineInstr &MI,
+ SplitStrategy Strategy,
+ unsigned OtherOpc) {
// Try below transformation.
//
// MOVi32imm + ANDS?Wrr ==> ANDWri + ANDS?Wri
@@ -208,9 +206,26 @@ bool AArch64MIPeepholeOpt::visitAND(unsigned Opc, MachineInstr &MI,
return splitTwoPartImm<T>(
MI,
- [Opc, OtherOpc](T Imm, unsigned RegSize, T &Imm0,
- T &Imm1) -> std::optional<OpcodePair> {
- if (splitBitmaskImm(Imm, RegSize, Imm0, Imm1))
+ [Opc, Strategy, OtherOpc](T Imm, unsigned RegSize, T &Imm0,
+ T &Imm1) -> std::optional<OpcodePair> {
+ // If this immediate is already a suitable bitmask, don't split it.
+ // TODO: Should we just combine the two instructions in this case?
+ if (AArch64_AM::isLogicalImmediate(Imm, RegSize))
+ return std::nullopt;
+
+ // If this immediate can be handled by one instruction, don't split it.
+ SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
+ AArch64_IMM::expandMOVImm(Imm, RegSize, Insn);
+ if (Insn.size() == 1)
+ return std::nullopt;
+
+ bool SplitSucc = false;
+ switch (Strategy) {
+ case SplitStrategy::Intersect:
+ SplitSucc = splitBitmaskImm(Imm, RegSize, Imm0, Imm1);
+ break;
+ }
+ if (SplitSucc)
return std::make_pair(Opc, !OtherOpc ? Opc : OtherOpc);
return std::nullopt;
},
@@ -859,16 +874,20 @@ bool AArch64MIPeepholeOpt::runOnMachineFunction(MachineFunction &MF) {
Changed |= visitINSERT(MI);
break;
case AArch64::ANDWrr:
- Changed |= visitAND<uint32_t>(AArch64::ANDWri, MI);
+ Changed |= trySplitLogicalImm<uint32_t>(AArch64::ANDWri, MI,
+ SplitStrategy::Intersect);
break;
case AArch64::ANDXrr:
- Changed |= visitAND<uint64_t>(AArch64::ANDXri, MI);
+ Changed |= trySplitLogicalImm<uint64_t>(AArch64::ANDXri, MI,
+ SplitStrategy::Intersect);
break;
case AArch64::ANDSWrr:
- Changed |= visitAND<uint32_t>(AArch64::ANDWri, MI, AArch64::ANDSWri);
+ Changed |= trySplitLogicalImm<uint32_t>(
+ AArch64::ANDWri, MI, SplitStrategy::Intersect, AArch64::ANDSWri);
break;
case AArch64::ANDSXrr:
- Changed |= visitAND<uint64_t>(AArch64::ANDXri, MI, AArch64::ANDSXri);
+ Changed |= trySplitLogicalImm<uint64_t>(
+ AArch64::ANDXri, MI, SplitStrategy::Intersect, AArch64::ANDSXri);
break;
case AArch64::ORRWrs:
Changed |= visitORR(MI);
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 40f49da..18ca22f 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -4905,14 +4905,17 @@ void AArch64TTIImpl::getUnrollingPreferences(
// Disable partial & runtime unrolling on -Os.
UP.PartialOptSizeThreshold = 0;
- // No need to unroll auto-vectorized loops
- if (findStringMetadataForLoop(L, "llvm.loop.isvectorized"))
- return;
-
// Scan the loop: don't unroll loops with calls as this could prevent
- // inlining.
+ // inlining. Don't unroll auto-vectorized loops either, though do allow
+ // unrolling of the scalar remainder.
+ bool IsVectorized = getBooleanLoopAttribute(L, "llvm.loop.isvectorized");
for (auto *BB : L->getBlocks()) {
for (auto &I : *BB) {
+ // Both auto-vectorized loops and the scalar remainder have the
+ // isvectorized attribute, so differentiate between them by the presence
+ // of vector instructions.
+ if (IsVectorized && I.getType()->isVectorTy())
+ return;
if (isa<CallBase>(I)) {
if (isa<CallInst>(I) || isa<InvokeInst>(I))
if (const Function *F = cast<CallBase>(I).getCalledFunction())
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
index 0b79850..1a15075 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
@@ -50,8 +50,10 @@ bool AArch64GISelUtils::isCMN(const MachineInstr *MaybeSub,
//
// %sub = G_SUB 0, %y
// %cmp = G_ICMP eq/ne, %z, %sub
+ // or with signed comparisons with the no-signed-wrap flag set
if (!MaybeSub || MaybeSub->getOpcode() != TargetOpcode::G_SUB ||
- !CmpInst::isEquality(Pred))
+ (!CmpInst::isEquality(Pred) &&
+ !(CmpInst::isSigned(Pred) && MaybeSub->getFlag(MachineInstr::NoSWrap))))
return false;
auto MaybeZero =
getIConstantVRegValWithLookThrough(MaybeSub->getOperand(1).getReg(), MRI);
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 1381a9b..d905692 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -1810,7 +1810,7 @@ bool AArch64InstructionSelector::selectCompareBranchFedByICmp(
// Couldn't optimize. Emit a compare + a Bcc.
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
- auto PredOp = ICmp.getOperand(1);
+ auto &PredOp = ICmp.getOperand(1);
emitIntegerCompare(ICmp.getOperand(2), ICmp.getOperand(3), PredOp, MIB);
const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(
static_cast<CmpInst::Predicate>(PredOp.getPredicate()));
@@ -2506,12 +2506,12 @@ bool AArch64InstructionSelector::earlySelect(MachineInstr &I) {
return false;
}
auto &PredOp = Cmp->getOperand(1);
- auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
- const AArch64CC::CondCode InvCC =
- changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred));
MIB.setInstrAndDebugLoc(I);
emitIntegerCompare(/*LHS=*/Cmp->getOperand(2),
/*RHS=*/Cmp->getOperand(3), PredOp, MIB);
+ auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
+ const AArch64CC::CondCode InvCC =
+ changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred));
emitCSINC(/*Dst=*/AddDst, /*Src =*/AddLHS, /*Src2=*/AddLHS, InvCC, MIB);
I.eraseFromParent();
return true;
@@ -3574,10 +3574,11 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return false;
}
- auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
+ auto &PredOp = I.getOperand(1);
+ emitIntegerCompare(I.getOperand(2), I.getOperand(3), PredOp, MIB);
+ auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
const AArch64CC::CondCode InvCC =
changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred));
- emitIntegerCompare(I.getOperand(2), I.getOperand(3), I.getOperand(1), MIB);
emitCSINC(/*Dst=*/I.getOperand(0).getReg(), /*Src1=*/AArch64::WZR,
/*Src2=*/AArch64::WZR, InvCC, MIB);
I.eraseFromParent();
@@ -5096,11 +5097,11 @@ bool AArch64InstructionSelector::tryOptSelect(GSelect &I) {
AArch64CC::CondCode CondCode;
if (CondOpc == TargetOpcode::G_ICMP) {
- auto Pred =
- static_cast<CmpInst::Predicate>(CondDef->getOperand(1).getPredicate());
+ auto &PredOp = CondDef->getOperand(1);
+ emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3), PredOp,
+ MIB);
+ auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
CondCode = changeICMPPredToAArch64CC(Pred);
- emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3),
- CondDef->getOperand(1), MIB);
} else {
// Get the condition code for the select.
auto Pred =
@@ -5148,29 +5149,37 @@ MachineInstr *AArch64InstructionSelector::tryFoldIntegerCompare(
MachineInstr *LHSDef = getDefIgnoringCopies(LHS.getReg(), MRI);
MachineInstr *RHSDef = getDefIgnoringCopies(RHS.getReg(), MRI);
auto P = static_cast<CmpInst::Predicate>(Predicate.getPredicate());
+
// Given this:
//
// x = G_SUB 0, y
- // G_ICMP x, z
+ // G_ICMP z, x
//
// Produce this:
//
- // cmn y, z
- if (isCMN(LHSDef, P, MRI))
- return emitCMN(LHSDef->getOperand(2), RHS, MIRBuilder);
+ // cmn z, y
+ if (isCMN(RHSDef, P, MRI))
+ return emitCMN(LHS, RHSDef->getOperand(2), MIRBuilder);
- // Same idea here, but with the RHS of the compare instead:
+ // Same idea here, but with the LHS of the compare instead:
//
// Given this:
//
// x = G_SUB 0, y
- // G_ICMP z, x
+ // G_ICMP x, z
//
// Produce this:
//
- // cmn z, y
- if (isCMN(RHSDef, P, MRI))
- return emitCMN(LHS, RHSDef->getOperand(2), MIRBuilder);
+ // cmn y, z
+ //
+ // But be careful! We need to swap the predicate!
+ if (isCMN(LHSDef, P, MRI)) {
+ if (!CmpInst::isEquality(P)) {
+ P = CmpInst::getSwappedPredicate(P);
+ Predicate = MachineOperand::CreatePredicate(P);
+ }
+ return emitCMN(LHSDef->getOperand(2), RHS, MIRBuilder);
+ }
// Given this:
//
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index bb0f667b..e0e1af7 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1650,6 +1650,12 @@ bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MI.eraseFromParent();
return true;
};
+ auto LowerTriOp = [&MI, &MIB](unsigned Opcode) {
+ MIB.buildInstr(Opcode, {MI.getOperand(0)},
+ {MI.getOperand(2), MI.getOperand(3), MI.getOperand(4)});
+ MI.eraseFromParent();
+ return true;
+ };
Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
switch (IntrinsicID) {
@@ -1828,6 +1834,10 @@ bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
return LowerBinOp(TargetOpcode::G_USUBSAT);
break;
}
+ case Intrinsic::aarch64_neon_udot:
+ return LowerTriOp(AArch64::G_UDOT);
+ case Intrinsic::aarch64_neon_sdot:
+ return LowerTriOp(AArch64::G_SDOT);
case Intrinsic::vector_reverse:
// TODO: Add support for vector_reverse
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
index 1cd9453..8c10673 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
@@ -228,12 +228,13 @@ void applyFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
B.buildConstant(LLT::scalar(64), -static_cast<int64_t>(MinOffset)));
}
-// Combines vecreduce_add(mul(ext(x), ext(y))) -> vecreduce_add(udot(x, y))
-// Or vecreduce_add(ext(x)) -> vecreduce_add(udot(x, 1))
+// Combines vecreduce_add(mul(ext(x), ext(y))) -> vecreduce_add([us]dot(x, y))
+// Or vecreduce_add(ext(mul(ext(x), ext(y)))) -> vecreduce_add([us]dot(x, y))
+// Or vecreduce_add(ext(x)) -> vecreduce_add([us]dot(x, 1))
// Similar to performVecReduceAddCombine in SelectionDAG
-bool matchExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
- const AArch64Subtarget &STI,
- std::tuple<Register, Register, bool> &MatchInfo) {
+bool matchExtAddvToDotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
+ const AArch64Subtarget &STI,
+ std::tuple<Register, Register, bool> &MatchInfo) {
assert(MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
"Expected a G_VECREDUCE_ADD instruction");
assert(STI.hasDotProd() && "Target should have Dot Product feature");
@@ -246,31 +247,57 @@ bool matchExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
if (DstTy.getScalarSizeInBits() != 32 || MidTy.getScalarSizeInBits() != 32)
return false;
- LLT SrcTy;
- auto I1Opc = I1->getOpcode();
- if (I1Opc == TargetOpcode::G_MUL) {
+ // Detect mul(ext, ext) with symmetric ext's. If I1Opc is G_ZEXT or G_SEXT
+ // then the ext's must match the same opcode. It is set to the ext opcode on
+ // output.
+ auto tryMatchingMulOfExt = [&MRI](MachineInstr *MI, Register &Out1,
+ Register &Out2, unsigned &I1Opc) {
// If result of this has more than 1 use, then there is no point in creating
- // udot instruction
- if (!MRI.hasOneNonDBGUse(MidReg))
+ // a dot instruction
+ if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
return false;
MachineInstr *ExtMI1 =
- getDefIgnoringCopies(I1->getOperand(1).getReg(), MRI);
+ getDefIgnoringCopies(MI->getOperand(1).getReg(), MRI);
MachineInstr *ExtMI2 =
- getDefIgnoringCopies(I1->getOperand(2).getReg(), MRI);
+ getDefIgnoringCopies(MI->getOperand(2).getReg(), MRI);
LLT Ext1DstTy = MRI.getType(ExtMI1->getOperand(0).getReg());
LLT Ext2DstTy = MRI.getType(ExtMI2->getOperand(0).getReg());
if (ExtMI1->getOpcode() != ExtMI2->getOpcode() || Ext1DstTy != Ext2DstTy)
return false;
+ if ((I1Opc == TargetOpcode::G_ZEXT || I1Opc == TargetOpcode::G_SEXT) &&
+ I1Opc != ExtMI1->getOpcode())
+ return false;
+ Out1 = ExtMI1->getOperand(1).getReg();
+ Out2 = ExtMI2->getOperand(1).getReg();
I1Opc = ExtMI1->getOpcode();
- SrcTy = MRI.getType(ExtMI1->getOperand(1).getReg());
- std::get<0>(MatchInfo) = ExtMI1->getOperand(1).getReg();
- std::get<1>(MatchInfo) = ExtMI2->getOperand(1).getReg();
+ return true;
+ };
+
+ LLT SrcTy;
+ unsigned I1Opc = I1->getOpcode();
+ if (I1Opc == TargetOpcode::G_MUL) {
+ Register Out1, Out2;
+ if (!tryMatchingMulOfExt(I1, Out1, Out2, I1Opc))
+ return false;
+ SrcTy = MRI.getType(Out1);
+ std::get<0>(MatchInfo) = Out1;
+ std::get<1>(MatchInfo) = Out2;
} else if (I1Opc == TargetOpcode::G_ZEXT || I1Opc == TargetOpcode::G_SEXT) {
- SrcTy = MRI.getType(I1->getOperand(1).getReg());
- std::get<0>(MatchInfo) = I1->getOperand(1).getReg();
- std::get<1>(MatchInfo) = 0;
+ Register I1Op = I1->getOperand(1).getReg();
+ MachineInstr *M = getDefIgnoringCopies(I1Op, MRI);
+ Register Out1, Out2;
+ if (M->getOpcode() == TargetOpcode::G_MUL &&
+ tryMatchingMulOfExt(M, Out1, Out2, I1Opc)) {
+ SrcTy = MRI.getType(Out1);
+ std::get<0>(MatchInfo) = Out1;
+ std::get<1>(MatchInfo) = Out2;
+ } else {
+ SrcTy = MRI.getType(I1Op);
+ std::get<0>(MatchInfo) = I1Op;
+ std::get<1>(MatchInfo) = 0;
+ }
} else {
return false;
}
@@ -288,11 +315,11 @@ bool matchExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
return true;
}
-void applyExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
- MachineIRBuilder &Builder,
- GISelChangeObserver &Observer,
- const AArch64Subtarget &STI,
- std::tuple<Register, Register, bool> &MatchInfo) {
+void applyExtAddvToDotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &Builder,
+ GISelChangeObserver &Observer,
+ const AArch64Subtarget &STI,
+ std::tuple<Register, Register, bool> &MatchInfo) {
assert(MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
"Expected a G_VECREDUCE_ADD instruction");
assert(STI.hasDotProd() && "Target should have Dot Product feature");
@@ -553,15 +580,15 @@ void applyExtUaddvToUaddlv(MachineInstr &MI, MachineRegisterInfo &MRI,
MI.eraseFromParent();
}
-// Pushes ADD/SUB through extend instructions to decrease the number of extend
-// instruction at the end by allowing selection of {s|u}addl sooner
-
+// Pushes ADD/SUB/MUL through extend instructions to decrease the number of
+// extend instruction at the end by allowing selection of {s|u}addl sooner
// i32 add(i32 ext i8, i32 ext i8) => i32 ext(i16 add(i16 ext i8, i16 ext i8))
bool matchPushAddSubExt(MachineInstr &MI, MachineRegisterInfo &MRI,
Register DstReg, Register SrcReg1, Register SrcReg2) {
assert((MI.getOpcode() == TargetOpcode::G_ADD ||
- MI.getOpcode() == TargetOpcode::G_SUB) &&
- "Expected a G_ADD or G_SUB instruction\n");
+ MI.getOpcode() == TargetOpcode::G_SUB ||
+ MI.getOpcode() == TargetOpcode::G_MUL) &&
+ "Expected a G_ADD, G_SUB or G_MUL instruction\n");
// Deal with vector types only
LLT DstTy = MRI.getType(DstReg);
@@ -594,9 +621,10 @@ void applyPushAddSubExt(MachineInstr &MI, MachineRegisterInfo &MRI,
B.buildInstr(MI.getOpcode(), {MidTy}, {Ext1Reg, Ext2Reg}).getReg(0);
// G_SUB has to sign-extend the result.
- // G_ADD needs to sext from sext and can sext or zext from zext, so the
- // original opcode is used.
- if (MI.getOpcode() == TargetOpcode::G_ADD)
+ // G_ADD needs to sext from sext and can sext or zext from zext, and G_MUL
+ // needs to use the original opcode so the original opcode is used for both.
+ if (MI.getOpcode() == TargetOpcode::G_ADD ||
+ MI.getOpcode() == TargetOpcode::G_MUL)
B.buildInstr(Opc, {DstReg}, {AddReg});
else
B.buildSExt(DstReg, AddReg);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index a17fb93..8a0c4ac 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -1389,6 +1389,9 @@ def FeatureAddSubU64Insts
: SubtargetFeature<"add-sub-u64-insts", "HasAddSubU64Insts", "true",
"Has v_add_u64 and v_sub_u64 instructions">;
+def FeatureMadU32Inst : SubtargetFeature<"mad-u32-inst", "HasMadU32Inst",
+ "true", "Has v_mad_u32 instruction">;
+
def FeatureMemToLDSLoad : SubtargetFeature<"vmem-to-lds-load-insts",
"HasVMemToLDSLoad",
"true",
@@ -2049,6 +2052,7 @@ def FeatureISAVersion12_50 : FeatureSet<
FeatureVmemPrefInsts,
FeatureLshlAddU64Inst,
FeatureAddSubU64Insts,
+ FeatureMadU32Inst,
FeatureLdsBarrierArriveAtomic,
FeatureSetPrioIncWgInst,
]>;
@@ -2429,7 +2433,7 @@ def HasAtomicFMinFMaxF64FlatInsts :
def HasLdsAtomicAddF64 :
Predicate<"Subtarget->hasLdsAtomicAddF64()">,
- AssemblerPredicate<(any_of FeatureGFX90AInsts)>;
+ AssemblerPredicate<(any_of FeatureGFX90AInsts, FeatureGFX1250Insts)>;
def HasFlatGlobalInsts : Predicate<"Subtarget->hasFlatGlobalInsts()">,
AssemblerPredicate<(all_of FeatureFlatGlobalInsts)>;
@@ -2572,6 +2576,10 @@ def HasFmaakFmamkF64Insts :
Predicate<"Subtarget->hasFmaakFmamkF64Insts()">,
AssemblerPredicate<(any_of FeatureGFX1250Insts)>;
+def HasAddMinMaxInsts :
+ Predicate<"Subtarget->hasAddMinMaxInsts()">,
+ AssemblerPredicate<(any_of FeatureGFX1250Insts)>;
+
def HasPkAddMinMaxInsts :
Predicate<"Subtarget->hasPkAddMinMaxInsts()">,
AssemblerPredicate<(any_of FeatureGFX1250Insts)>;
@@ -2839,6 +2847,9 @@ def HasLshlAddU64Inst : Predicate<"Subtarget->hasLshlAddU64Inst()">,
def HasAddSubU64Insts : Predicate<"Subtarget->hasAddSubU64Insts()">,
AssemblerPredicate<(all_of FeatureAddSubU64Insts)>;
+def HasMadU32Inst : Predicate<"Subtarget->hasMadU32Inst()">,
+ AssemblerPredicate<(all_of FeatureMadU32Inst)>;
+
def HasLdsBarrierArriveAtomic : Predicate<"Subtarget->hasLdsBarrierArriveAtomic()">,
AssemblerPredicate<(all_of FeatureLdsBarrierArriveAtomic)>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 5f19837..a9278c1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -89,10 +89,6 @@ static cl::opt<bool> DisableFDivExpand(
cl::ReallyHidden,
cl::init(false));
-static bool hasUnsafeFPMath(const Function &F) {
- return F.getFnAttribute("unsafe-fp-math").getValueAsBool();
-}
-
class AMDGPUCodeGenPrepareImpl
: public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
public:
@@ -104,7 +100,6 @@ public:
const DominatorTree *DT;
const UniformityInfo &UA;
const DataLayout &DL;
- const bool HasUnsafeFPMath;
const bool HasFP32DenormalFlush;
bool FlowChanged = false;
mutable Function *SqrtF32 = nullptr;
@@ -117,7 +112,6 @@ public:
const DominatorTree *DT, const UniformityInfo &UA)
: F(F), ST(TM.getSubtarget<GCNSubtarget>(F)), TM(TM), TLI(TLI), AC(AC),
DT(DT), UA(UA), DL(F.getDataLayout()),
- HasUnsafeFPMath(hasUnsafeFPMath(F)),
HasFP32DenormalFlush(SIModeRegisterDefaults(F, ST).FP32Denormals ==
DenormalMode::getPreserveSign()) {}
@@ -637,8 +631,7 @@ bool AMDGPUCodeGenPrepareImpl::canOptimizeWithRsq(const FPMathOperator *SqrtOp,
return false;
// v_rsq_f32 gives 1ulp
- return SqrtFMF.approxFunc() || HasUnsafeFPMath ||
- SqrtOp->getFPAccuracy() >= 1.0f;
+ return SqrtFMF.approxFunc() || SqrtOp->getFPAccuracy() >= 1.0f;
}
Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
@@ -664,7 +657,7 @@ Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
IRBuilder<>::FastMathFlagGuard Guard(Builder);
Builder.setFastMathFlags(DivFMF | SqrtFMF);
- if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) || HasUnsafeFPMath ||
+ if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) ||
canIgnoreDenormalInput(Den, CtxI)) {
Value *Result = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, Den);
// -1.0 / sqrt(x) -> fneg(rsq(x))
@@ -680,7 +673,7 @@ Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
// Optimize fdiv with rcp:
//
// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
-// allowed with unsafe-fp-math or afn.
+// allowed with afn.
//
// a/b -> a*rcp(b) when arcp is allowed, and we only need provide ULP 1.0
Value *
@@ -803,9 +796,9 @@ Value *AMDGPUCodeGenPrepareImpl::visitFDivElement(
//
// With rcp:
// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
-// allowed with unsafe-fp-math or afn.
+// allowed with afn.
//
-// a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
+// a/b -> a*rcp(b) when inaccurate rcp is allowed with afn.
//
// With fdiv.fast:
// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
@@ -843,7 +836,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
RsqOp = SqrtOp->getOperand(0);
}
- // Inaccurate rcp is allowed with unsafe-fp-math or afn.
+ // Inaccurate rcp is allowed with afn.
//
// Defer to codegen to handle this.
//
@@ -852,7 +845,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
// expansion of afn to codegen. The current interpretation is so aggressive we
// don't need any pre-consideration here when we have better information. A
// more conservative interpretation could use handling here.
- const bool AllowInaccurateRcp = HasUnsafeFPMath || DivFMF.approxFunc();
+ const bool AllowInaccurateRcp = DivFMF.approxFunc();
if (!RsqOp && AllowInaccurateRcp)
return false;
@@ -2026,7 +2019,7 @@ bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) {
// We're trying to handle the fast-but-not-that-fast case only. The lowering
// of fast llvm.sqrt will give the raw instruction anyway.
- if (SqrtFMF.approxFunc() || HasUnsafeFPMath)
+ if (SqrtFMF.approxFunc())
return false;
const float ReqdAccuracy = FPOp->getFPAccuracy();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index c01e5d3..992572f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -143,6 +143,9 @@ def gi_global_saddr_cpol :
def gi_global_saddr_glc :
GIComplexOperandMatcher<s64, "selectGlobalSAddrGLC">,
GIComplexPatternEquiv<GlobalSAddrGLC>;
+def gi_global_saddr_no_ioffset :
+ GIComplexOperandMatcher<s64, "selectGlobalSAddrNoIOffset">,
+ GIComplexPatternEquiv<GlobalSAddrNoIOffset>;
def gi_mubuf_scratch_offset :
GIComplexOperandMatcher<s32, "selectMUBUFScratchOffset">,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
index 2991778..19b8757 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
@@ -204,7 +204,7 @@ MetadataStreamerMsgPackV4::getWorkGroupDimensions(MDNode *Node) const {
for (auto &Op : Node->operands())
Dims.push_back(Dims.getDocument()->getNode(
- uint64_t(mdconst::extract<ConstantInt>(Op)->getZExtValue())));
+ mdconst::extract<ConstantInt>(Op)->getZExtValue()));
return Dims;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index dfaa145..39b4200 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -1134,15 +1134,26 @@ void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
SDLoc SL(N);
bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
unsigned Opc;
+ bool UseNoCarry = Subtarget->hasMadU64U32NoCarry() && !N->hasAnyUseOfValue(1);
if (Subtarget->hasMADIntraFwdBug())
Opc = Signed ? AMDGPU::V_MAD_I64_I32_gfx11_e64
: AMDGPU::V_MAD_U64_U32_gfx11_e64;
+ else if (UseNoCarry)
+ Opc = Signed ? AMDGPU::V_MAD_NC_I64_I32_e64 : AMDGPU::V_MAD_NC_U64_U32_e64;
else
Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
Clamp };
+
+ if (UseNoCarry) {
+ MachineSDNode *Mad = CurDAG->getMachineNode(Opc, SL, MVT::i64, Ops);
+ ReplaceUses(SDValue(N, 0), SDValue(Mad, 0));
+ CurDAG->RemoveDeadNode(N);
+ return;
+ }
+
CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
}
@@ -2049,6 +2060,24 @@ bool AMDGPUDAGToDAGISel::SelectGlobalSAddrGLC(SDNode *N, SDValue Addr,
return true;
}
+bool AMDGPUDAGToDAGISel::SelectGlobalSAddrNoIOffset(SDNode *N, SDValue Addr,
+ SDValue &SAddr,
+ SDValue &VOffset,
+ SDValue &CPol) const {
+ bool ScaleOffset;
+ SDValue DummyOffset;
+ if (!SelectGlobalSAddr(N, Addr, SAddr, VOffset, DummyOffset, ScaleOffset,
+ false))
+ return false;
+
+ // We are assuming CPol is always the last operand of the intrinsic.
+ auto PassedCPol =
+ N->getConstantOperandVal(N->getNumOperands() - 1) & ~AMDGPU::CPol::SCAL;
+ CPol = CurDAG->getTargetConstant(
+ (ScaleOffset ? AMDGPU::CPol::SCAL : 0) | PassedCPol, SDLoc(), MVT::i32);
+ return true;
+}
+
static SDValue SelectSAddrFI(SelectionDAG *CurDAG, SDValue SAddr) {
if (auto *FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
SAddr = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0));
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
index 5636d89..983f1aa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
@@ -174,6 +174,8 @@ private:
bool SelectGlobalSAddrGLC(SDNode *N, SDValue Addr, SDValue &SAddr,
SDValue &VOffset, SDValue &Offset,
SDValue &CPol) const;
+ bool SelectGlobalSAddrNoIOffset(SDNode *N, SDValue Addr, SDValue &SAddr,
+ SDValue &VOffset, SDValue &CPol) const;
bool SelectScratchSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
SDValue &Offset) const;
bool checkFlatScratchSVSSwizzleBug(SDValue VAddr, SDValue SAddr,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 6118933..31c4f62 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -2634,7 +2634,7 @@ bool AMDGPUTargetLowering::allowApproxFunc(const SelectionDAG &DAG,
if (Flags.hasApproximateFuncs())
return true;
auto &Options = DAG.getTarget().Options;
- return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
+ return Options.ApproxFuncFPMath;
}
bool AMDGPUTargetLowering::needsDenormHandlingF32(const SelectionDAG &DAG,
@@ -2757,7 +2757,7 @@ SDValue AMDGPUTargetLowering::LowerFLOGCommon(SDValue Op,
const auto &Options = getTargetMachine().Options;
if (VT == MVT::f16 || Flags.hasApproximateFuncs() ||
- Options.ApproxFuncFPMath || Options.UnsafeFPMath) {
+ Options.ApproxFuncFPMath) {
if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
// Log and multiply in f32 is good enough for f16.
@@ -3585,7 +3585,7 @@ SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) con
if (N0.getValueType() == MVT::f32)
return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
- if (getTargetMachine().Options.UnsafeFPMath) {
+ if (Op->getFlags().hasApproximateFuncs()) {
// There is a generic expand for FP_TO_FP16 with unsafe fast math.
return SDValue();
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 266dee1..b0d3b12 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -574,13 +574,22 @@ bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
MachineBasicBlock *BB = I.getParent();
MachineFunction *MF = BB->getParent();
const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
+ bool UseNoCarry = Subtarget->hasMadU64U32NoCarry() &&
+ MRI->use_nodbg_empty(I.getOperand(1).getReg());
unsigned Opc;
if (Subtarget->hasMADIntraFwdBug())
Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
: AMDGPU::V_MAD_I64_I32_gfx11_e64;
+ else if (UseNoCarry)
+ Opc = IsUnsigned ? AMDGPU::V_MAD_NC_U64_U32_e64
+ : AMDGPU::V_MAD_NC_I64_I32_e64;
else
Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
+
+ if (UseNoCarry)
+ I.removeOperand(1);
+
I.setDesc(TII.get(Opc));
I.addOperand(*MF, MachineOperand::CreateImm(0));
I.addImplicitDefUseOperands(*MF);
@@ -3995,6 +4004,9 @@ bool AMDGPUInstructionSelector::selectBITOP3(MachineInstr &MI) const {
}
unsigned Opc = IsB32 ? AMDGPU::V_BITOP3_B32_e64 : AMDGPU::V_BITOP3_B16_e64;
+ if (!IsB32 && STI.hasTrue16BitInsts())
+ Opc = STI.useRealTrue16Insts() ? AMDGPU::V_BITOP3_B16_gfx1250_t16_e64
+ : AMDGPU::V_BITOP3_B16_gfx1250_fake16_e64;
unsigned CBL = STI.getConstantBusLimit(Opc);
MachineBasicBlock *MBB = MI.getParent();
const DebugLoc &DL = MI.getDebugLoc();
@@ -5789,6 +5801,17 @@ AMDGPUInstructionSelector::selectGlobalSAddrGLC(MachineOperand &Root) const {
}
InstructionSelector::ComplexRendererFns
+AMDGPUInstructionSelector::selectGlobalSAddrNoIOffset(
+ MachineOperand &Root) const {
+ const MachineInstr &I = *Root.getParent();
+
+ // We are assuming CPol is always the last operand of the intrinsic.
+ auto PassedCPol =
+ I.getOperand(I.getNumOperands() - 1).getImm() & ~AMDGPU::CPol::SCAL;
+ return selectGlobalSAddr(Root, PassedCPol, false);
+}
+
+InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
Register Addr = Root.getReg();
Register PtrBase;
@@ -6971,13 +6994,13 @@ void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_0_0(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
MIB.addImm(
- (MI.getOperand(OpIdx).getImm() & 0x2) ? (int64_t)SISrcMods::OP_SEL_0 : 0);
+ (MI.getOperand(OpIdx).getImm() & 0x1) ? (int64_t)SISrcMods::OP_SEL_0 : 0);
}
void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_0_1(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
- MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x2)
+ MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x1)
? (int64_t)(SISrcMods::OP_SEL_0 | SISrcMods::DST_OP_SEL)
: (int64_t)SISrcMods::DST_OP_SEL);
}
@@ -6986,13 +7009,13 @@ void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_1_0(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
MIB.addImm(
- (MI.getOperand(OpIdx).getImm() & 0x1) ? (int64_t)SISrcMods::OP_SEL_0 : 0);
+ (MI.getOperand(OpIdx).getImm() & 0x2) ? (int64_t)SISrcMods::OP_SEL_0 : 0);
}
void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_1_1(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
- MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x1)
+ MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x2)
? (int64_t)(SISrcMods::OP_SEL_0)
: 0);
}
@@ -7021,8 +7044,9 @@ void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_2_0(
void AMDGPUInstructionSelector::renderDstSelToOpSel3XFormXForm(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
- MIB.addImm(
- (MI.getOperand(OpIdx).getImm() & 0x2) ? (int64_t)SISrcMods::DST_OP_SEL : 0);
+ MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x2)
+ ? (int64_t)SISrcMods::DST_OP_SEL
+ : 0);
}
void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index fe9743d0a..140e753 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -264,6 +264,8 @@ private:
selectGlobalSAddrCPol(MachineOperand &Root) const;
InstructionSelector::ComplexRendererFns
selectGlobalSAddrGLC(MachineOperand &Root) const;
+ InstructionSelector::ComplexRendererFns
+ selectGlobalSAddrNoIOffset(MachineOperand &Root) const;
InstructionSelector::ComplexRendererFns
selectScratchSAddr(MachineOperand &Root) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 7a50923..511fc69 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -94,7 +94,6 @@ def NoFP32Denormals : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode()
def NoFP64Denormals : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode().FP64FP16Denormals == DenormalMode::getPreserveSign()">;
def IEEEModeEnabled : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode().IEEE">;
def IEEEModeDisabled : Predicate<"!MF->getInfo<SIMachineFunctionInfo>()->getMode().IEEE">;
-def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
}
def FMA : Predicate<"Subtarget->hasFMA()">;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index fedfa3f..1fdf272 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1342,13 +1342,30 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.scalarize(0);
if (ST.hasVOP3PInsts()) {
- getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX, G_ABS})
- .legalFor({S32, S16, V2S16})
- .clampMaxNumElements(0, S16, 2)
- .minScalar(0, S16)
- .widenScalarToNextPow2(0)
- .scalarize(0)
- .lower();
+ getActionDefinitionsBuilder(G_ABS)
+ .legalFor({S32, S16, V2S16})
+ .clampMaxNumElements(0, S16, 2)
+ .minScalar(0, S16)
+ .widenScalarToNextPow2(0)
+ .scalarize(0)
+ .lower();
+ if (ST.hasIntMinMax64()) {
+ getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
+ .legalFor({S32, S16, S64, V2S16})
+ .clampMaxNumElements(0, S16, 2)
+ .minScalar(0, S16)
+ .widenScalarToNextPow2(0)
+ .scalarize(0)
+ .lower();
+ } else {
+ getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
+ .legalFor({S32, S16, V2S16})
+ .clampMaxNumElements(0, S16, 2)
+ .minScalar(0, S16)
+ .widenScalarToNextPow2(0)
+ .scalarize(0)
+ .lower();
+ }
} else {
getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX, G_ABS})
.legalFor({S32, S16})
@@ -1682,7 +1699,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
if (ST.hasFlatAtomicFaddF32Inst())
Atomic.legalFor({{S32, FlatPtr}});
- if (ST.hasGFX90AInsts()) {
+ if (ST.hasGFX90AInsts() || ST.hasGFX1250Insts()) {
// These are legal with some caveats, and should have undergone expansion in
// the IR in most situations
// TODO: Move atomic expansion into legalizer
@@ -2295,8 +2312,8 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
LLT::scalar(32), commonAlignment(Align(64), Offset));
// Pointer address
- B.buildPtrAdd(LoadAddr, KernargPtrReg,
- B.buildConstant(LLT::scalar(64), Offset).getReg(0));
+ B.buildObjectPtrOffset(LoadAddr, KernargPtrReg,
+ B.buildConstant(LLT::scalar(64), Offset).getReg(0));
// Load address
return B.buildLoad(S32, LoadAddr, *MMO).getReg(0);
}
@@ -2317,8 +2334,9 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
MachineMemOperand::MOInvariant,
LLT::scalar(32), commonAlignment(Align(64), StructOffset));
- B.buildPtrAdd(LoadAddr, QueuePtr,
- B.buildConstant(LLT::scalar(64), StructOffset).getReg(0));
+ B.buildObjectPtrOffset(
+ LoadAddr, QueuePtr,
+ B.buildConstant(LLT::scalar(64), StructOffset).getReg(0));
return B.buildLoad(S32, LoadAddr, *MMO).getReg(0);
}
@@ -3326,7 +3344,7 @@ static bool allowApproxFunc(const MachineFunction &MF, unsigned Flags) {
if (Flags & MachineInstr::FmAfn)
return true;
const auto &Options = MF.getTarget().Options;
- return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
+ return Options.ApproxFuncFPMath;
}
static bool needsDenormHandlingF32(const MachineFunction &MF, Register Src,
@@ -3432,7 +3450,7 @@ bool AMDGPULegalizerInfo::legalizeFlogCommon(MachineInstr &MI,
static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
if (Ty == F16 || MI.getFlag(MachineInstr::FmAfn) ||
- TM.Options.ApproxFuncFPMath || TM.Options.UnsafeFPMath) {
+ TM.Options.ApproxFuncFPMath) {
if (Ty == F16 && !ST.has16BitInsts()) {
Register LogVal = MRI.createGenericVirtualRegister(F32);
auto PromoteSrc = B.buildFPExt(F32, X);
@@ -4500,8 +4518,7 @@ Register AMDGPULegalizerInfo::getKernargParameterPtr(MachineIRBuilder &B,
llvm_unreachable("failed to find kernarg segment ptr");
auto COffset = B.buildConstant(LLT::scalar(64), Offset);
- // TODO: Should get nuw
- return B.buildPtrAdd(PtrTy, KernArgReg, COffset).getReg(0);
+ return B.buildObjectPtrOffset(PtrTy, KernArgReg, COffset).getReg(0);
}
/// Legalize a value that's loaded from kernel arguments. This is only used by
@@ -4860,9 +4877,7 @@ bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV(MachineInstr &MI,
uint16_t Flags = MI.getFlags();
LLT ResTy = MRI.getType(Res);
- const MachineFunction &MF = B.getMF();
- bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn) ||
- MF.getTarget().Options.UnsafeFPMath;
+ bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn);
if (const auto *CLHS = getConstantFPVRegVal(LHS, MRI)) {
if (!AllowInaccurateRcp && ResTy != LLT::scalar(16))
@@ -4922,9 +4937,7 @@ bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV64(MachineInstr &MI,
uint16_t Flags = MI.getFlags();
LLT ResTy = MRI.getType(Res);
- const MachineFunction &MF = B.getMF();
- bool AllowInaccurateRcp = MF.getTarget().Options.UnsafeFPMath ||
- MI.getFlag(MachineInstr::FmAfn);
+ bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn);
if (!AllowInaccurateRcp)
return false;
@@ -5676,8 +5689,8 @@ bool AMDGPULegalizerInfo::getImplicitArgPtr(Register DstReg,
AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR))
return false;
- // FIXME: This should be nuw
- B.buildPtrAdd(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
+ B.buildObjectPtrOffset(DstReg, KernargPtrReg,
+ B.buildConstant(IdxTy, Offset).getReg(0));
return true;
}
@@ -7019,8 +7032,8 @@ bool AMDGPULegalizerInfo::legalizeTrapHsaQueuePtr(
// Pointer address
Register LoadAddr = MRI.createGenericVirtualRegister(
LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
- B.buildPtrAdd(LoadAddr, KernargPtrReg,
- B.buildConstant(LLT::scalar(64), Offset).getReg(0));
+ B.buildObjectPtrOffset(LoadAddr, KernargPtrReg,
+ B.buildConstant(LLT::scalar(64), Offset).getReg(0));
// Load address
Register Temp = B.buildLoad(S64, LoadAddr, *MMO).getReg(0);
B.buildCopy(SGPR01, Temp);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index 8767208..aa75534 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -53,8 +53,6 @@ private:
using FuncInfo = llvm::AMDGPULibFunc;
- bool UnsafeFPMath = false;
-
// -fuse-native.
bool AllNative = false;
@@ -117,7 +115,6 @@ private:
bool AllowStrictFP = false);
protected:
- bool isUnsafeMath(const FPMathOperator *FPOp) const;
bool isUnsafeFiniteOnlyMath(const FPMathOperator *FPOp) const;
bool canIncreasePrecisionOfConstantFold(const FPMathOperator *FPOp) const;
@@ -415,23 +412,17 @@ bool AMDGPULibCalls::parseFunctionName(const StringRef &FMangledName,
return AMDGPULibFunc::parse(FMangledName, FInfo);
}
-bool AMDGPULibCalls::isUnsafeMath(const FPMathOperator *FPOp) const {
- return UnsafeFPMath || FPOp->isFast();
-}
-
bool AMDGPULibCalls::isUnsafeFiniteOnlyMath(const FPMathOperator *FPOp) const {
- return UnsafeFPMath ||
- (FPOp->hasApproxFunc() && FPOp->hasNoNaNs() && FPOp->hasNoInfs());
+ return FPOp->hasApproxFunc() && FPOp->hasNoNaNs() && FPOp->hasNoInfs();
}
bool AMDGPULibCalls::canIncreasePrecisionOfConstantFold(
const FPMathOperator *FPOp) const {
// TODO: Refine to approxFunc or contract
- return isUnsafeMath(FPOp);
+ return FPOp->isFast();
}
void AMDGPULibCalls::initFunction(Function &F, FunctionAnalysisManager &FAM) {
- UnsafeFPMath = F.getFnAttribute("unsafe-fp-math").getValueAsBool();
AC = &FAM.getResult<AssumptionAnalysis>(F);
TLInfo = &FAM.getResult<TargetLibraryAnalysis>(F);
DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
index f471881..b45627d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
@@ -294,7 +294,8 @@ void RegBankLegalizeHelper::splitLoad(MachineInstr &MI,
BasePlusOffset = Base;
} else {
auto Offset = B.buildConstant({PtrRB, OffsetTy}, ByteOffset);
- BasePlusOffset = B.buildPtrAdd({PtrRB, PtrTy}, Base, Offset).getReg(0);
+ BasePlusOffset =
+ B.buildObjectPtrOffset({PtrRB, PtrTy}, Base, Offset).getReg(0);
}
auto *OffsetMMO = MF.getMachineMemOperand(&BaseMMO, ByteOffset, PartTy);
auto LoadPart = B.buildLoad({DstRB, PartTy}, BasePlusOffset, *OffsetMMO);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index c5a1d9e..c8e45d4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4009,10 +4009,6 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_SADDE:
case AMDGPU::G_USUBE:
case AMDGPU::G_SSUBE:
- case AMDGPU::G_SMIN:
- case AMDGPU::G_SMAX:
- case AMDGPU::G_UMIN:
- case AMDGPU::G_UMAX:
case AMDGPU::G_ABS:
case AMDGPU::G_SHUFFLE_VECTOR:
case AMDGPU::G_SBFX:
@@ -4022,6 +4018,18 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
if (isSALUMapping(MI))
return getDefaultMappingSOP(MI);
return getDefaultMappingVOP(MI);
+ case AMDGPU::G_SMIN:
+ case AMDGPU::G_SMAX:
+ case AMDGPU::G_UMIN:
+ case AMDGPU::G_UMAX:
+ if (isSALUMapping(MI)) {
+ // There are no scalar 64-bit min and max, use vector instruction instead.
+ if (MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() == 64 &&
+ Subtarget.hasIntMinMax64())
+ return getDefaultMappingVOP(MI);
+ return getDefaultMappingSOP(MI);
+ }
+ return getDefaultMappingVOP(MI);
case AMDGPU::G_FADD:
case AMDGPU::G_FSUB:
case AMDGPU::G_FMUL:
@@ -4566,8 +4574,23 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_cvt_pknorm_u16:
case Intrinsic::amdgcn_cvt_pk_i16:
case Intrinsic::amdgcn_cvt_pk_u16:
+ case Intrinsic::amdgcn_cvt_sr_pk_f16_f32:
+ case Intrinsic::amdgcn_cvt_sr_pk_bf16_f32:
case Intrinsic::amdgcn_cvt_pk_f16_fp8:
case Intrinsic::amdgcn_cvt_pk_f16_bf8:
+ case Intrinsic::amdgcn_cvt_pk_fp8_f16:
+ case Intrinsic::amdgcn_cvt_pk_bf8_f16:
+ case Intrinsic::amdgcn_cvt_sr_fp8_f16:
+ case Intrinsic::amdgcn_cvt_sr_bf8_f16:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f16_fp8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_bf16_fp8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f16_bf8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_bf16_bf8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f16_fp4:
+ case Intrinsic::amdgcn_cvt_scale_pk8_bf16_fp4:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f32_fp8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f32_bf8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f32_fp4:
case Intrinsic::amdgcn_sat_pk4_i4_i8:
case Intrinsic::amdgcn_sat_pk4_u4_u8:
case Intrinsic::amdgcn_fmed3:
@@ -4619,8 +4642,10 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_cvt_pk_f32_fp8:
case Intrinsic::amdgcn_cvt_pk_f32_bf8:
case Intrinsic::amdgcn_cvt_pk_fp8_f32:
+ case Intrinsic::amdgcn_cvt_pk_fp8_f32_e5m3:
case Intrinsic::amdgcn_cvt_pk_bf8_f32:
case Intrinsic::amdgcn_cvt_sr_fp8_f32:
+ case Intrinsic::amdgcn_cvt_sr_fp8_f32_e5m3:
case Intrinsic::amdgcn_cvt_sr_bf8_f32:
case Intrinsic::amdgcn_cvt_sr_bf16_f32:
case Intrinsic::amdgcn_cvt_sr_f16_f32:
@@ -5364,6 +5389,14 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[1] = AMDGPU::getValueMapping(Bank, 32);
break;
}
+ case Intrinsic::amdgcn_global_store_async_from_lds_b8:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b32:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b64:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b128:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b8:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b32:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b64:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b128:
case Intrinsic::amdgcn_load_to_lds:
case Intrinsic::amdgcn_global_load_lds: {
OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 38f9ee5..c1f1703 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -104,7 +104,9 @@
#include "llvm/Transforms/Scalar/FlattenCFG.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Scalar/InferAddressSpaces.h"
+#include "llvm/Transforms/Scalar/LICM.h"
#include "llvm/Transforms/Scalar/LoopDataPrefetch.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include "llvm/Transforms/Scalar/NaryReassociate.h"
#include "llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h"
#include "llvm/Transforms/Scalar/Sink.h"
@@ -2066,7 +2068,12 @@ void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
// TODO: May want to move later or split into an early and late one.
addPass(AMDGPUCodeGenPreparePass(TM));
- // TODO: LICM
+ // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
+ // have expanded.
+ if (TM.getOptLevel() > CodeGenOptLevel::Less) {
+ addPass(createFunctionToLoopPassAdaptor(LICMPass(LICMOptions()),
+ /*UseMemorySSA=*/true));
+ }
}
Base::addIRPasses(addPass);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 24f4df2..a0c99b0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -597,7 +597,6 @@ InstructionCost GCNTTIImpl::getArithmeticInstrCost(
// Estimate all types may be fused with contract/unsafe flags
const TargetOptions &Options = TLI->getTargetMachine().Options;
if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
- Options.UnsafeFPMath ||
(FAdd->hasAllowContract() && CxtI->hasAllowContract()))
return TargetTransformInfo::TCC_Free;
}
@@ -650,8 +649,7 @@ InstructionCost GCNTTIImpl::getArithmeticInstrCost(
return LT.first * Cost * NElts;
}
- if (SLT == MVT::f32 && ((CxtI && CxtI->hasApproxFunc()) ||
- TLI->getTargetMachine().Options.UnsafeFPMath)) {
+ if (SLT == MVT::f32 && (CxtI && CxtI->hasApproxFunc())) {
// Fast unsafe fdiv lowering:
// f32 rcp
// f32 fmul
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 44e65b3..a83caa0 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -180,6 +180,7 @@ public:
ImmTyMatrixBFMT,
ImmTyMatrixAReuse,
ImmTyMatrixBReuse,
+ ImmTyScaleSel,
ImmTyByteSel,
};
@@ -689,6 +690,8 @@ public:
bool isVSrc_v2f16() const { return isVSrc_f16() || isLiteralImm(MVT::v2f16); }
+ bool isVSrc_NoInline_v2f16() const { return isVSrc_v2f16(); }
+
bool isVISrcB32() const {
return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i32);
}
@@ -1182,6 +1185,7 @@ public:
case ImmTyMatrixBFMT: OS << "ImmTyMatrixBFMT"; break;
case ImmTyMatrixAReuse: OS << "ImmTyMatrixAReuse"; break;
case ImmTyMatrixBReuse: OS << "ImmTyMatrixBReuse"; break;
+ case ImmTyScaleSel: OS << "ScaleSel" ; break;
case ImmTyByteSel: OS << "ByteSel" ; break;
}
// clang-format on
@@ -2036,6 +2040,7 @@ static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_KIMM16:
return &APFloat::IEEEhalf();
case AMDGPU::OPERAND_REG_IMM_BF16:
@@ -2405,6 +2410,7 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
case AMDGPU::OPERAND_REG_IMM_V2INT32:
case AMDGPU::OPERAND_KIMM32:
@@ -2456,6 +2462,9 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
setImmKindConst();
return;
}
+ [[fallthrough]];
+
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
setImmKindLiteral();
@@ -3761,6 +3770,9 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
OperandType == AMDGPU::OPERAND_REG_INLINE_C_BF16)
return AMDGPU::isInlinableLiteralBF16(Val, hasInv2PiInlineImm());
+ if (OperandType == AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16)
+ return false;
+
llvm_unreachable("invalid operand type");
}
default:
@@ -9356,6 +9368,14 @@ void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
}
}
+ if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::scale_sel))
+ addOptionalImmOperand(Inst, Operands, OptionalIdx,
+ AMDGPUOperand::ImmTyScaleSel);
+
+ if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp))
+ addOptionalImmOperand(Inst, Operands, OptionalIdx,
+ AMDGPUOperand::ImmTyClamp);
+
if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::byte_sel)) {
if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vdst_in))
Inst.addOperand(Inst.getOperand(0));
@@ -9363,10 +9383,6 @@ void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
AMDGPUOperand::ImmTyByteSel);
}
- if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp))
- addOptionalImmOperand(Inst, Operands, OptionalIdx,
- AMDGPUOperand::ImmTyClamp);
-
if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::omod))
addOptionalImmOperand(Inst, Operands, OptionalIdx,
AMDGPUOperand::ImmTyOModSI);
@@ -9420,8 +9436,22 @@ void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands,
Opc == AMDGPU::V_CVT_PK_FP8_F32_fake16_e64_dpp8_gfx12 ||
Opc == AMDGPU::V_CVT_SR_FP8_F32_gfx12_e64_dpp_gfx12 ||
Opc == AMDGPU::V_CVT_SR_FP8_F32_gfx12_e64_dpp8_gfx12 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F32_gfx1250_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F32_gfx1250_e64_dpp8_gfx1250 ||
Opc == AMDGPU::V_CVT_SR_BF8_F32_gfx12_e64_dpp_gfx12 ||
- Opc == AMDGPU::V_CVT_SR_BF8_F32_gfx12_e64_dpp8_gfx12)) {
+ Opc == AMDGPU::V_CVT_SR_BF8_F32_gfx12_e64_dpp8_gfx12 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_t16_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_fake16_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_t16_e64_dpp8_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_fake16_e64_dpp8_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_t16_e64_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_fake16_e64_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_t16_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_fake16_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_t16_e64_dpp8_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_fake16_e64_dpp8_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_t16_e64_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_fake16_e64_gfx1250)) {
Inst.addOperand(Inst.getOperand(0));
}
@@ -10016,9 +10046,12 @@ void AMDGPUAsmParser::cvtVOP3DPP(MCInst &Inst, const OperandVector &Operands,
addOptionalImmOperand(Inst, Operands, OptionalIdx,
AMDGPUOperand::ImmTyClamp);
- if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::byte_sel))
+ if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::byte_sel)) {
+ if (VdstInIdx == static_cast<int>(Inst.getNumOperands()))
+ Inst.addOperand(Inst.getOperand(0));
addOptionalImmOperand(Inst, Operands, OptionalIdx,
AMDGPUOperand::ImmTyByteSel);
+ }
if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::omod))
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index f99e716..1956a15 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -2489,7 +2489,7 @@ multiclass VBUFFER_MTBUF_Real_gfx12<bits<4> op, string real_name> {
}
//===----------------------------------------------------------------------===//
-// MUBUF - GFX11, GFX12.
+// MUBUF - GFX11, GFX12, GFX1250.
//===----------------------------------------------------------------------===//
// gfx11 instruction that accept both old and new assembler name.
@@ -2600,6 +2600,12 @@ multiclass MUBUF_Real_Atomic_gfx11_gfx12<bits<8> op,
def : Mnem_gfx12<gfx11_name, gfx12_name>;
}
+multiclass MUBUF_Real_Atomic_gfx12_Renamed<bits<8> op, string real_name> :
+ MUBUF_Real_Atomic_gfx12_impl<op, 0, real_name>,
+ MUBUF_Real_Atomic_gfx12_impl<op, 1, real_name> {
+ def : Mnem_gfx12<get_BUF_ps<NAME>.Mnemonic, real_name>;
+}
+
defm BUFFER_GL0_INV : MUBUF_Real_gfx11<0x02B>;
defm BUFFER_GL1_INV : MUBUF_Real_gfx11<0x02C>;
@@ -2678,6 +2684,10 @@ defm BUFFER_ATOMIC_XOR_X2 : MUBUF_Real_Atomic_gfx11_gfx12<0x04B, "buffer
defm BUFFER_ATOMIC_PK_ADD_F16 : MUBUF_Real_Atomic_gfx12<0x059>;
defm BUFFER_ATOMIC_PK_ADD_BF16 : MUBUF_Real_Atomic_gfx12<0x05a>;
+defm BUFFER_ATOMIC_ADD_F64 : MUBUF_Real_Atomic_gfx12<0x055>;
+defm BUFFER_ATOMIC_MIN_F64 : MUBUF_Real_Atomic_gfx12_Renamed<0x05b, "buffer_atomic_min_num_f64">;
+defm BUFFER_ATOMIC_MAX_F64 : MUBUF_Real_Atomic_gfx12_Renamed<0x05c, "buffer_atomic_max_num_f64">;
+
//===----------------------------------------------------------------------===//
// MUBUF - GFX10.
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index 42edec0..c466f9c 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -199,6 +199,7 @@ add_llvm_target(AMDGPUCodeGen
Instrumentation
MC
MIRParser
+ ObjCARC
Passes
Scalar
SelectionDAG
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index 319cc9d..3ff675d 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -1397,6 +1397,9 @@ defm DS_BVH_STACK_RTN_B32 : DS_Real_gfx12<0x0e0,
defm DS_BVH_STACK_PUSH8_POP1_RTN_B32 : DS_Real_gfx12<0x0e1>;
defm DS_BVH_STACK_PUSH8_POP2_RTN_B64 : DS_Real_gfx12<0x0e2>;
+defm DS_ADD_F64 : DS_Real_gfx12<0x054>;
+defm DS_ADD_RTN_F64 : DS_Real_gfx12<0x074>;
+
let AssemblerPredicate = HasLdsBarrierArriveAtomic in {
defm DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 : DS_Real_gfx12<0x056>;
defm DS_ATOMIC_BARRIER_ARRIVE_RTN_B64 : DS_Real_gfx12<0x075>;
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 0f172e0d..d5d1074 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -11,6 +11,7 @@ let WantsRoot = true in {
def GlobalOffset : ComplexPattern<iPTR, 2, "SelectGlobalOffset", [], [], -10>;
def ScratchOffset : ComplexPattern<iPTR, 2, "SelectScratchOffset", [], [], -10>;
+ def GlobalSAddrNoIOffset : ComplexPattern<iPTR, 3, "SelectGlobalSAddrNoIOffset", [], [], -3>;
def GlobalSAddr : ComplexPattern<iPTR, 4, "SelectGlobalSAddr", [], [], -10>;
def GlobalSAddrGLC : ComplexPattern<iPTR, 4, "SelectGlobalSAddrGLC", [], [], -10>;
def GlobalSAddrCPol : ComplexPattern<iPTR, 4, "SelectGlobalSAddrCPol", [], [], -10>;
@@ -1361,6 +1362,26 @@ class FlatLoadSaddrPat_D16_t16 <FLAT_Pseudo inst, SDPatternOperator node, ValueT
(inst $saddr, $voffset, $offset, $cpol)
>;
+class FlatLoadLDSSignedPat <FLAT_Pseudo inst, SDPatternOperator node> : GCNPat <
+ (node (i64 VReg_64:$vaddr), (i32 VGPR_32:$dsaddr), (i32 timm:$offset), (i32 timm:$cpol)),
+ (inst $dsaddr, $vaddr, $offset, $cpol)
+>;
+
+class GlobalLoadLDSSaddrPat <FLAT_Pseudo inst, SDPatternOperator node> : GCNPat <
+ (node (GlobalSAddrNoIOffset (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), CPol:$cpol), (i32 VGPR_32:$dsaddr), (i32 timm:$offset), (i32 timm)),
+ (inst $dsaddr, $saddr, $voffset, $offset, $cpol)
+>;
+
+class FlatStoreLDSSignedPat <FLAT_Pseudo inst, SDPatternOperator node> : GCNPat <
+ (node (i64 VReg_64:$vaddr), (i32 VGPR_32:$dsaddr), (i32 timm:$offset), (i32 timm:$cpol)),
+ (inst $vaddr, $dsaddr, $offset, $cpol)
+>;
+
+class GlobalStoreLDSSaddrPat <FLAT_Pseudo inst, SDPatternOperator node> : GCNPat <
+ (node (GlobalSAddrNoIOffset (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), CPol:$cpol), (i32 VGPR_32:$dsaddr), (i32 timm:$offset), (i32 timm)),
+ (inst $saddr, $voffset, $dsaddr, $offset, $cpol)
+>;
+
class GlobalLoadSaddrPat_D16_t16 <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : GCNPat <
(vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset, CPol:$cpol))),
(inst $saddr, $voffset, $offset, $cpol)
@@ -1571,6 +1592,26 @@ class ScratchLoadSVaddrPat_D16_t16 <FLAT_Pseudo inst, SDPatternOperator node, Va
(inst $vaddr, $saddr, $offset, $cpol)
>;
+multiclass GlobalLoadLDSPats<FLAT_Pseudo inst, SDPatternOperator node> {
+ def : FlatLoadLDSSignedPat <inst, node> {
+ let AddedComplexity = 10;
+ }
+
+ def : GlobalLoadLDSSaddrPat<!cast<FLAT_Pseudo>(!cast<string>(inst)#"_SADDR"), node> {
+ let AddedComplexity = 11;
+ }
+}
+
+multiclass GlobalStoreLDSPats<FLAT_Pseudo inst, SDPatternOperator node> {
+ def : FlatStoreLDSSignedPat <inst, node> {
+ let AddedComplexity = 10;
+ }
+
+ def : GlobalStoreLDSSaddrPat<!cast<FLAT_Pseudo>(!cast<string>(inst)#"_SADDR"), node> {
+ let AddedComplexity = 11;
+ }
+}
+
multiclass GlobalFLATLoadPats<FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> {
def : FlatLoadSignedPat <inst, node, vt> {
let AddedComplexity = 10;
@@ -2137,6 +2178,18 @@ let OtherPredicates = [isGFX125xOnly] in {
defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B128, int_amdgcn_global_load_monitor_b128, v4i32>;
} // End SubtargetPredicate = isGFX125xOnly
+let OtherPredicates = [isGFX1250Plus] in {
+ defm : GlobalLoadLDSPats <GLOBAL_LOAD_ASYNC_TO_LDS_B8, int_amdgcn_global_load_async_to_lds_b8>;
+ defm : GlobalLoadLDSPats <GLOBAL_LOAD_ASYNC_TO_LDS_B32, int_amdgcn_global_load_async_to_lds_b32>;
+ defm : GlobalLoadLDSPats <GLOBAL_LOAD_ASYNC_TO_LDS_B64, int_amdgcn_global_load_async_to_lds_b64>;
+ defm : GlobalLoadLDSPats <GLOBAL_LOAD_ASYNC_TO_LDS_B128, int_amdgcn_global_load_async_to_lds_b128>;
+
+ defm : GlobalStoreLDSPats <GLOBAL_STORE_ASYNC_FROM_LDS_B8, int_amdgcn_global_store_async_from_lds_b8>;
+ defm : GlobalStoreLDSPats <GLOBAL_STORE_ASYNC_FROM_LDS_B32, int_amdgcn_global_store_async_from_lds_b32>;
+ defm : GlobalStoreLDSPats <GLOBAL_STORE_ASYNC_FROM_LDS_B64, int_amdgcn_global_store_async_from_lds_b64>;
+ defm : GlobalStoreLDSPats <GLOBAL_STORE_ASYNC_FROM_LDS_B128, int_amdgcn_global_store_async_from_lds_b128>;
+}
+
let SubtargetPredicate = HasAtomicFMinFMaxF32GlobalInsts, OtherPredicates = [HasFlatGlobalInsts] in {
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_FMIN", "atomic_load_fmin_global", f32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_FMAX", "atomic_load_fmax_global", f32>;
@@ -3435,6 +3488,14 @@ defm GLOBAL_LOAD_TR_B64_w32 : VFLAT_Real_AllAddr_gfx1250<0x058, "globa
defm GLOBAL_LOAD_TR4_B64 : VFLAT_Real_AllAddr_gfx1250<0x073>;
defm GLOBAL_LOAD_TR6_B96 : VFLAT_Real_AllAddr_gfx1250<0x074>;
+defm FLAT_ATOMIC_ADD_F64 : VFLAT_Real_Atomics_gfx1250<0x055>;
+defm FLAT_ATOMIC_MIN_F64 : VFLAT_Real_Atomics_gfx1250<0x05b, "flat_atomic_min_num_f64">;
+defm FLAT_ATOMIC_MAX_F64 : VFLAT_Real_Atomics_gfx1250<0x05c, "flat_atomic_max_num_f64">;
+
+defm GLOBAL_ATOMIC_ADD_F64 : VFLAT_Real_Atomics_gfx1250<0x055>;
+defm GLOBAL_ATOMIC_MIN_F64 : VFLAT_Real_Atomics_gfx1250<0x05b, "global_atomic_min_num_f64">;
+defm GLOBAL_ATOMIC_MAX_F64 : VFLAT_Real_Atomics_gfx1250<0x05c, "global_atomic_max_num_f64">;
+
def True16D16Table : GenericTable {
let FilterClass = "True16D16Table";
let CppTypeName = "True16D16Info";
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index ce1ce68..33b66a6 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -896,15 +896,10 @@ GCNScheduleDAGMILive::getRegionLiveInMap() const {
assert(!Regions.empty());
std::vector<MachineInstr *> RegionFirstMIs;
RegionFirstMIs.reserve(Regions.size());
- auto I = Regions.rbegin(), E = Regions.rend();
- do {
- const MachineBasicBlock *MBB = I->first->getParent();
- auto *MI = &*skipDebugInstructionsForward(I->first, I->second);
- RegionFirstMIs.push_back(MI);
- do {
- ++I;
- } while (I != E && I->first->getParent() == MBB);
- } while (I != E);
+ for (auto &[RegionBegin, RegionEnd] : reverse(Regions))
+ RegionFirstMIs.push_back(
+ &*skipDebugInstructionsForward(RegionBegin, RegionEnd));
+
return getLiveRegMap(RegionFirstMIs, /*After=*/false, *LIS);
}
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 10ded0e..bdd900d 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -273,6 +273,7 @@ protected:
bool HasMinimum3Maximum3PKF16 = false;
bool HasLshlAddU64Inst = false;
bool HasAddSubU64Insts = false;
+ bool HasMadU32Inst = false;
bool HasPointSampleAccel = false;
bool HasLdsBarrierArriveAtomic = false;
bool HasSetPrioIncWgInst = false;
@@ -715,7 +716,9 @@ public:
bool hasVINTERPEncoding() const { return GFX11Insts && !hasGFX1250Insts(); }
// DS_ADD_F64/DS_ADD_RTN_F64
- bool hasLdsAtomicAddF64() const { return hasGFX90AInsts(); }
+ bool hasLdsAtomicAddF64() const {
+ return hasGFX90AInsts() || hasGFX1250Insts();
+ }
bool hasMultiDwordFlatScratchAddressing() const {
return getGeneration() >= GFX9;
@@ -1519,9 +1522,22 @@ public:
// \returns true if the target has V_ADD_U64/V_SUB_U64 instructions.
bool hasAddSubU64Insts() const { return HasAddSubU64Insts; }
+ // \returns true if the target has V_MAD_U32 instruction.
+ bool hasMadU32Inst() const { return HasMadU32Inst; }
+
// \returns true if the target has V_MUL_U64/V_MUL_I64 instructions.
bool hasVectorMulU64() const { return GFX1250Insts; }
+ // \returns true if the target has V_MAD_NC_U64_U32/V_MAD_NC_I64_I32
+ // instructions.
+ bool hasMadU64U32NoCarry() const { return GFX1250Insts; }
+
+ // \returns true if the target has V_{MIN|MAX}_{I|U}64 instructions.
+ bool hasIntMinMax64() const { return GFX1250Insts; }
+
+ // \returns true if the target has V_ADD_{MIN|MAX}_{I|U}32 instructions.
+ bool hasAddMinMaxInsts() const { return GFX1250Insts; }
+
// \returns true if the target has V_PK_ADD_{MIN|MAX}_{I|U}16 instructions.
bool hasPkAddMinMaxInsts() const { return GFX1250Insts; }
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index 11b072e..42c4d8b 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -540,6 +540,8 @@ void AMDGPUInstPrinter::printImmediateV216(uint32_t Imm, uint8_t OpType,
printImmediateBFloat16(static_cast<uint16_t>(Imm), STI, O))
return;
break;
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
+ break;
default:
llvm_unreachable("bad operand type");
}
@@ -770,6 +772,7 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
@@ -1790,4 +1793,14 @@ void AMDGPUInstPrinter::printBitOp3(const MCInst *MI, unsigned OpNo,
O << formatHex(static_cast<uint64_t>(Imm));
}
+void AMDGPUInstPrinter::printScaleSel(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ uint8_t Imm = MI->getOperand(OpNo).getImm();
+ if (!Imm)
+ return;
+
+ O << " scale_sel:" << formatDec(Imm);
+}
+
#include "AMDGPUGenAsmWriter.inc"
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h
index e0b7aa5..f6739b14 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h
@@ -173,6 +173,8 @@ private:
const MCSubtargetInfo &STI, raw_ostream &O,
StringRef Prefix, bool PrintInHex, bool AlwaysPrint);
+ void printScaleSel(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O);
void printBitOp3(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
index c49ad79..f358084 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
@@ -341,6 +341,9 @@ std::optional<uint64_t> AMDGPUMCCodeEmitter::getLitEncoding(
return AMDGPU::getInlineEncodingV2BF16(static_cast<uint32_t>(Imm))
.value_or(255);
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
+ return 255;
+
case AMDGPU::OPERAND_KIMM32:
case AMDGPU::OPERAND_KIMM16:
case AMDGPU::OPERAND_KIMM64:
diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h
index 40b8bcd..c564145 100644
--- a/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -208,6 +208,7 @@ enum OperandType : unsigned {
OPERAND_REG_IMM_V2BF16,
OPERAND_REG_IMM_V2FP16,
OPERAND_REG_IMM_V2INT16,
+ OPERAND_REG_IMM_NOINLINE_V2FP16,
OPERAND_REG_IMM_V2INT32,
OPERAND_REG_IMM_V2FP32,
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index b77da4d..e934152 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -468,6 +468,7 @@ bool SIFoldOperandsImpl::canUseImmWithOpSel(const MachineInstr *MI,
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 9017f4f..ad26757 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -909,6 +909,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
Custom);
}
+ if (Subtarget->hasIntMinMax64())
+ setOperationAction({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, MVT::i64,
+ Legal);
+
setOperationAction(ISD::INTRINSIC_WO_CHAIN,
{MVT::Other, MVT::f32, MVT::v4f32, MVT::i16, MVT::f16,
MVT::bf16, MVT::v2i16, MVT::v2f16, MVT::v2bf16, MVT::i128,
@@ -1256,6 +1260,25 @@ MVT SITargetLowering::getPointerMemTy(const DataLayout &DL, unsigned AS) const {
return AMDGPUTargetLowering::getPointerMemTy(DL, AS);
}
+static unsigned getIntrMemWidth(unsigned IntrID) {
+ switch (IntrID) {
+ case Intrinsic::amdgcn_global_load_async_to_lds_b8:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b8:
+ return 8;
+ case Intrinsic::amdgcn_global_load_async_to_lds_b32:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b32:
+ return 32;
+ case Intrinsic::amdgcn_global_load_async_to_lds_b64:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b64:
+ return 64;
+ case Intrinsic::amdgcn_global_load_async_to_lds_b128:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b128:
+ return 128;
+ default:
+ llvm_unreachable("Unknown width");
+ }
+}
+
bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &CI,
MachineFunction &MF,
@@ -1527,6 +1550,26 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.flags |= MachineMemOperand::MOStore;
return true;
}
+ case Intrinsic::amdgcn_global_load_async_to_lds_b8:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b32:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b64:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b128: {
+ Info.opc = ISD::INTRINSIC_VOID;
+ Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
+ Info.ptrVal = CI.getArgOperand(1);
+ Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+ return true;
+ }
+ case Intrinsic::amdgcn_global_store_async_from_lds_b8:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b32:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b64:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b128: {
+ Info.opc = ISD::INTRINSIC_VOID;
+ Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
+ Info.ptrVal = CI.getArgOperand(0);
+ Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+ return true;
+ }
case Intrinsic::amdgcn_load_to_lds:
case Intrinsic::amdgcn_global_load_lds: {
Info.opc = ISD::INTRINSIC_VOID;
@@ -1623,10 +1666,18 @@ bool SITargetLowering::getAddrModeArguments(const IntrinsicInst *II,
case Intrinsic::amdgcn_global_load_tr_b128:
case Intrinsic::amdgcn_global_load_tr4_b64:
case Intrinsic::amdgcn_global_load_tr6_b96:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b8:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b32:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b64:
+ case Intrinsic::amdgcn_global_store_async_from_lds_b128:
Ptr = II->getArgOperand(0);
break;
case Intrinsic::amdgcn_load_to_lds:
case Intrinsic::amdgcn_global_load_lds:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b8:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b32:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b64:
+ case Intrinsic::amdgcn_global_load_async_to_lds_b128:
Ptr = II->getArgOperand(1);
break;
default:
@@ -4241,7 +4292,7 @@ SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Chain = BaseAddr.getValue(1);
Align StackAlign = TFL->getStackAlign();
if (Alignment > StackAlign) {
- uint64_t ScaledAlignment = (uint64_t)Alignment.value()
+ uint64_t ScaledAlignment = Alignment.value()
<< Subtarget->getWavefrontSizeLog2();
uint64_t StackAlignMask = ScaledAlignment - 1;
SDValue TmpAddr = DAG.getNode(ISD::ADD, dl, VT, BaseAddr,
@@ -7148,7 +7199,7 @@ SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
}
- if (getTargetMachine().Options.UnsafeFPMath) {
+ if (Op->getFlags().hasApproximateFuncs()) {
SDValue Flags = Op.getOperand(1);
SDValue Src32 = DAG.getNode(ISD::FP_ROUND, DL, MVT::f32, Src, Flags);
return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Src32, Flags);
@@ -11243,8 +11294,7 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
EVT VT = Op.getValueType();
const SDNodeFlags Flags = Op->getFlags();
- bool AllowInaccurateRcp =
- Flags.hasApproximateFuncs() || DAG.getTarget().Options.UnsafeFPMath;
+ bool AllowInaccurateRcp = Flags.hasApproximateFuncs();
if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
// Without !fpmath accuracy information, we can't do more because we don't
@@ -11263,7 +11313,7 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
// 1.0 / sqrt(x) -> rsq(x)
- // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
+ // XXX - Is afn sufficient to do this for f64? The maximum ULP
// error seems really high at 2^29 ULP.
// 1.0 / x -> rcp(x)
return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
@@ -11297,8 +11347,7 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op,
EVT VT = Op.getValueType();
const SDNodeFlags Flags = Op->getFlags();
- bool AllowInaccurateDiv =
- Flags.hasApproximateFuncs() || DAG.getTarget().Options.UnsafeFPMath;
+ bool AllowInaccurateDiv = Flags.hasApproximateFuncs();
if (!AllowInaccurateDiv)
return SDValue();
@@ -14550,7 +14599,7 @@ unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
return ISD::FMAD;
const TargetOptions &Options = DAG.getTarget().Options;
- if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
+ if ((Options.AllowFPOpFusion == FPOpFusion::Fast ||
(N0->getFlags().hasAllowContract() &&
N1->getFlags().hasAllowContract())) &&
isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
@@ -15673,9 +15722,9 @@ SDValue SITargetLowering::performFMACombine(SDNode *N,
// fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
// regardless of the denorm mode setting. Therefore,
- // unsafe-fp-math/fp-contract is sufficient to allow generating fdot2.
+ // fp-contract is sufficient to allow generating fdot2.
const TargetOptions &Options = DAG.getTarget().Options;
- if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
+ if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
(N->getFlags().hasAllowContract() &&
FMA->getFlags().hasAllowContract())) {
Op1 = Op1.getOperand(0);
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 520c321..4b48fc4 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1380,6 +1380,20 @@ bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt(
Modified = true;
} else
WaitcntInstr = &II;
+ } else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
+ assert(ST->hasVMemToLDSLoad());
+ LLVM_DEBUG(dbgs() << "Processing S_WAITCNT_lds_direct: " << II
+ << "Before: " << Wait.LoadCnt << '\n';);
+ ScoreBrackets.determineWait(LOAD_CNT, FIRST_LDS_VGPR, Wait);
+ LLVM_DEBUG(dbgs() << "After: " << Wait.LoadCnt << '\n';);
+
+ // It is possible (but unlikely) that this is the only wait instruction,
+ // in which case, we exit this loop without a WaitcntInstr to consume
+ // `Wait`. But that works because `Wait` was passed in by reference, and
+ // the callee eventually calls createNewWaitcnt on it. We test this
+ // possibility in an articial MIR test since such a situation cannot be
+ // recreated by running the memory legalizer.
+ II.eraseFromParent();
} else {
assert(Opcode == AMDGPU::S_WAITCNT_VSCNT);
assert(II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
@@ -1551,6 +1565,11 @@ bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt(
ScoreBrackets.simplifyWaitcnt(OldWait);
Wait = Wait.combined(OldWait);
UpdatableInstr = &CombinedStoreDsCntInstr;
+ } else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
+ // Architectures higher than GFX10 do not have direct loads to
+ // LDS, so no work required here yet.
+ II.eraseFromParent();
+ continue;
} else {
std::optional<InstCounterType> CT = counterTypeForInstr(Opcode);
assert(CT.has_value());
@@ -2415,6 +2434,7 @@ static bool isWaitInstr(MachineInstr &Inst) {
Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL) ||
Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT ||
Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT ||
+ Opcode == AMDGPU::S_WAITCNT_lds_direct ||
counterTypeForInstr(Opcode).has_value();
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 2aa6b4e..044a681 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -4438,6 +4438,8 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
return AMDGPU::isInlinableLiteralV2BF16(Imm);
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
+ return false;
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
if (isInt<16>(Imm) || isUInt<16>(Imm)) {
@@ -9281,6 +9283,16 @@ unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
default:
if (MI.isMetaInstruction())
return 0;
+
+ // If D16 Pseudo inst, get correct MC code size
+ const auto *D16Info = AMDGPU::getT16D16Helper(Opc);
+ if (D16Info) {
+ // Assume d16_lo/hi inst are always in same size
+ unsigned LoInstOpcode = D16Info->LoOp;
+ const MCInstrDesc &Desc = getMCOpcodeFromPseudo(LoInstOpcode);
+ DescSize = Desc.getSize();
+ }
+
return DescSize;
}
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 83b0490..a3e20ba 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1313,6 +1313,10 @@ def MatrixBFMT : CustomOperand<i32, 1, "MatrixBFMT">;
def MatrixAReuse : NamedBitOperand<"matrix_a_reuse">;
def MatrixBReuse : NamedBitOperand<"matrix_b_reuse">;
+def ScaleSel : NamedIntOperand<"scale_sel"> {
+ let Validator = "isUInt<3>";
+}
+
class KImmFPOperand<ValueType vt> : ImmOperand<vt> {
let OperandNamespace = "AMDGPU";
let OperandType = "OPERAND_KIMM"#vt.Size;
@@ -2859,6 +2863,7 @@ def VOP_I16_F16 : VOPProfile <[i16, f16, untyped, untyped]>;
def VOP_I16_I16 : VOPProfile <[i16, i16, untyped, untyped]>;
def VOP_BF16_BF16 : VOPProfile<[bf16, bf16, untyped, untyped]>;
def VOP1_I16_I32 : VOPProfile<[i16, i32, untyped, untyped]>;
+def VOP_I16_V2F16 : VOPProfile<[i16, v2f16, untyped, untyped]>;
def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>;
@@ -2926,6 +2931,8 @@ def VOP_V2BF16_F32_F32 : VOPProfile <[v2bf16, f32, f32, untyped]>;
def VOP_V32F32_V6I32_F32 : VOPProfile <[v32f32, v6i32, f32, untyped]>;
def VOP_V32F16_V6I32_F32 : VOPProfile <[v32f16, v6i32, f32, untyped]>;
def VOP_V32BF16_V6I32_F32 : VOPProfile <[v32bf16, v6i32, f32, untyped]>;
+def VOP_V2BF16_F32_F32_I32 : VOPProfile <[v2bf16, f32, f32, i32]>;
+def VOP_V2F16_F32_F32_I32 : VOPProfile <[v2f16, f32, f32, i32]>;
def VOP_V6I32_V32F16_F32 : VOPProfile<[v6i32, v32f16, f32, untyped]>;
def VOP_V6I32_V32BF16_F32 : VOPProfile<[v6i32, v32bf16, f32, untyped]>;
def VOP_V6I32_V16F32_V16F32_F32 : VOPProfile<[v6i32, v16f32, v16f32, f32]>;
@@ -2941,6 +2948,13 @@ def VOP_BF16_F32_I32 : VOPProfile<[bf16, f32, i32, untyped]>;
def VOP_F16_F32_I32 : VOPProfile<[f16, f32, i32, untyped]>;
def VOP_I32_BF16_I32_F32 : VOPProfile<[i32, bf16, i32, f32]>;
def VOP_I32_F16_I32_F32 : VOPProfile<[i32, f16, i32, f32]>;
+def VOP_V8F16_V2I32_I32 : VOPProfile<[v8f16, v2i32, i32, untyped]>;
+def VOP_V8BF16_V2I32_I32 : VOPProfile<[v8bf16, v2i32, i32, untyped]>;
+def VOP_V8F16_I32_I32 : VOPProfile<[v8f16, i32, i32, untyped]>;
+def VOP_V8BF16_I32_I32 : VOPProfile<[v8bf16, i32, i32, untyped]>;
+def VOP_V16F32_V3I32_I32 : VOPProfile<[v16f32, v3i32, i32, untyped]>;
+def VOP_V8F32_V2I32_I32 : VOPProfile<[v8f32, v2i32, i32, untyped]>;
+def VOP_V8F32_I32_I32 : VOPProfile<[v8f32, i32, i32, untyped]>;
def VOP_I32_F32_I32_F32 : VOPProfile<[i32, f32, i32, f32]>;
def VOP_V6I32_V32BF16_I32_F32 : VOPProfile<[v6i32, v32bf16, i32, f32]>;
diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index 025731a..53f554e 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -1170,6 +1170,16 @@ bool SIGfx6CacheControl::insertWait(MachineBasicBlock::iterator &MI,
Changed = true;
}
+ // On architectures that support direct loads to LDS, emit an unknown waitcnt
+ // at workgroup-scoped release operations that specify the LDS address space.
+ // SIInsertWaitcnts will later replace this with a vmcnt().
+ if (ST.hasVMemToLDSLoad() && isReleaseOrStronger(Order) &&
+ Scope == SIAtomicScope::WORKGROUP &&
+ (AddrSpace & SIAtomicAddrSpace::LDS) != SIAtomicAddrSpace::NONE) {
+ BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_WAITCNT_lds_direct));
+ Changed = true;
+ }
+
if (Pos == Position::AFTER)
--MI;
@@ -2078,6 +2088,16 @@ bool SIGfx10CacheControl::insertWait(MachineBasicBlock::iterator &MI,
Changed = true;
}
+ // On architectures that support direct loads to LDS, emit an unknown waitcnt
+ // at workgroup-scoped release operations that specify the LDS address space.
+ // SIInsertWaitcnts will later replace this with a vmcnt().
+ if (ST.hasVMemToLDSLoad() && isReleaseOrStronger(Order) &&
+ Scope == SIAtomicScope::WORKGROUP &&
+ (AddrSpace & SIAtomicAddrSpace::LDS) != SIAtomicAddrSpace::NONE) {
+ BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_WAITCNT_lds_direct));
+ Changed = true;
+ }
+
if (VSCnt) {
BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_WAITCNT_VSCNT_soft))
.addReg(AMDGPU::SGPR_NULL, RegState::Undef)
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 218841d..36d1a3b 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -1218,6 +1218,8 @@ def VSrc_f64 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_FP64"> {
def VSrc_v2b32 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_V2INT32">;
def VSrc_v2f32 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_V2FP32">;
+def VSrc_NoInline_v2f16 : SrcRegOrImm9 <VS_32, "OPERAND_REG_IMM_NOINLINE_V2FP16">;
+
//===----------------------------------------------------------------------===//
// VRegSrc_* Operands with a VGPR
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index e103ccc..8303410 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -1621,6 +1621,13 @@ let OtherPredicates = [HasImageInsts] in {
def S_WAIT_KMCNT_soft : SOPP_Pseudo <"s_soft_wait_kmcnt", (ins s16imm:$simm16), "$simm16">;
}
+// Represents the point at which a wave must wait for all outstanding direct loads to LDS.
+// Typically inserted by the memory legalizer and consumed by SIInsertWaitcnts.
+
+def S_WAITCNT_lds_direct : SPseudoInstSI<(outs), (ins)> {
+ let hasSideEffects = 0;
+}
+
def S_SETHALT : SOPP_Pseudo <"s_sethalt" , (ins i32imm:$simm16), "$simm16",
[(int_amdgcn_s_sethalt timm:$simm16)]>;
def S_SETKILL : SOPP_Pseudo <"s_setkill" , (ins i16imm:$simm16), "$simm16">;
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 83e63ac..5827f18 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -2659,6 +2659,7 @@ bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
case AMDGPU::OPERAND_REG_IMM_FP64:
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
@@ -3023,6 +3024,8 @@ bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType) {
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
return isInlinableLiteralV2BF16(Literal);
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
+ return false;
default:
llvm_unreachable("bad packed operand type");
}
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index c09a9d6..74d59f4 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -1636,6 +1636,7 @@ inline unsigned getOperandSize(const MCOperandInfo &OpInfo) {
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
return 2;
default:
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 550ec9d..9de7d6d 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -1344,6 +1344,8 @@ def V_FMAAK_F64 : VOP2_Pseudo<"v_fmaak_f64", VOP_MADAK_F64, [], "">;
} // End SubtargetPredicate = HasFmaakFmamkF64Insts, isReMaterializable = 1, FixedSize = 1, Size = 12, SchedRW = [Write64Bit]
let SubtargetPredicate = HasPkFmacF16Inst in {
+// FIXME: V_PK_FMAC_F16 is currently not used in instruction selection.
+// If this changes, ensure the DPP variant is not used for GFX11+.
defm V_PK_FMAC_F16 : VOP2Inst<"v_pk_fmac_f16", VOP_V2F16_V2F16_V2F16>;
} // End SubtargetPredicate = HasPkFmacF16Inst
@@ -1904,7 +1906,7 @@ multiclass VOP2_Real_FULL_with_name_gfx11_gfx12<bits<6> op, string opName,
VOP2_Real_FULL_with_name<GFX12Gen, op, opName, asmName>;
multiclass VOP2_Real_e32_gfx11_gfx12<bits<6> op> :
- VOP2Only_Real<GFX11Gen, op>, VOP2Only_Real<GFX12Gen, op>;
+ VOP2Only_Real_e32<GFX11Gen, op>, VOP2Only_Real_e32<GFX12Gen, op>;
multiclass VOP3Only_Realtriple_gfx11_gfx12<bits<10> op> :
VOP3Only_Realtriple<GFX11Gen, op>, VOP3Only_Realtriple<GFX12Gen, op>;
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index b6f9568..1ffe39d 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -32,9 +32,10 @@ class VOP3b_Profile<ValueType vt> : VOPProfile<[vt, vt, vt, vt]> {
let HasExtDPP = 0;
}
-let HasExt64BitDPP = 1 in {
-def VOP3b_F32_I1_F32_F32_F32 : VOP3b_Profile<f32>;
-def VOP3b_F64_I1_F64_F64_F64 : VOP3b_Profile<f64>;
+def DIV_FIXUP_F32_PROF : VOP3_Profile<VOP_F32_F32_F32_F32> {
+ let HasExtVOP3DPP = 0;
+ let HasExtDPP = 0;
+}
def VOP3b_I64_I1_I32_I32_I64 : VOPProfile<[i64, i32, i32, i64]> {
let HasClamp = 1;
@@ -44,6 +45,10 @@ def VOP3b_I64_I1_I32_I32_I64 : VOPProfile<[i64, i32, i32, i64]> {
let Asm64 = "$vdst, $sdst, $src0, $src1, $src2$clamp";
}
+let HasExt64BitDPP = 1 in {
+def VOP3b_F32_I1_F32_F32_F32 : VOP3b_Profile<f32>;
+def VOP3b_F64_I1_F64_F64_F64 : VOP3b_Profile<f64>;
+
class V_MUL_PROF<VOPProfile P> : VOP3_Profile<P> {
let HasExtVOP3DPP = 0;
let HasExtDPP = 0;
@@ -52,10 +57,13 @@ class V_MUL_PROF<VOPProfile P> : VOP3_Profile<P> {
def V_LSHL_ADD_U64_PROF : VOP3_Profile<VOP_I64_I64_I32_I64>;
def VOP_F64_F64_F64_F64_DPP_PROF : VOP3_Profile<VOP_F64_F64_F64_F64>;
-
-def DIV_FIXUP_F32_PROF : VOP3_Profile<VOP_F32_F32_F32_F32> {
+def V_MAD_U32_PROF: VOP3_Profile<VOP_I32_I32_I32_I32> {
let HasExtVOP3DPP = 0;
- let HasExtDPP = 0;
+ let HasExt64BitDPP = 1;
+}
+def VOP_I64_I64_I64_DPP : VOP3_Profile<VOP_I64_I64_I64>;
+def VOP_I32_I32_I64_DPP : VOP3_Profile<VOPProfile<[i64, i32, i32, i64]>> {
+ let HasClamp = 1;
}
} // End HasExt64BitDPP = 1;
@@ -152,6 +160,15 @@ defm V_MAD_U32_U24 : VOP3Inst <"v_mad_u32_u24", VOP3_Profile<VOP_I32_I32_I32_I32
defm V_FMA_F32 : VOP3Inst <"v_fma_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, any_fma>, VOPD_Component<0x13, "v_fma_f32">;
defm V_LERP_U8 : VOP3Inst <"v_lerp_u8", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_lerp>;
+let SchedRW = [WriteIntMul] in {
+ let SubtargetPredicate = HasMadU32Inst in
+ defm V_MAD_U32 : VOP3Inst <"v_mad_u32", V_MAD_U32_PROF>;
+ let SubtargetPredicate = isGFX1250Plus in {
+ defm V_MAD_NC_U64_U32 : VOP3Inst<"v_mad_nc_u64_u32", VOP_I32_I32_I64_DPP>;
+ defm V_MAD_NC_I64_I32 : VOP3Inst<"v_mad_nc_i64_i32", VOP_I32_I32_I64_DPP>;
+ }
+}
+
let SchedRW = [WriteDoubleAdd] in {
let FPDPRounding = 1 in {
defm V_FMA_F64 : VOP3Inst <"v_fma_f64", VOP_F64_F64_F64_F64_DPP_PROF, any_fma>, VOPD_Component<0x20, "v_fma_f64">;
@@ -185,6 +202,13 @@ defm V_MAXIMUM_F64 : VOP3Inst <"v_maximum_f64", VOP3_Profile<VOP_F64_F64_F64>, f
} // End SchedRW = [WriteDoubleAdd]
} // End SubtargetPredicate = HasIEEEMinimumMaximumInsts, ReadsModeReg = 0, AddedComplexity = 1
+let SubtargetPredicate = isGFX1250Plus, SchedRW = [WriteDoubleAdd] in {
+defm V_MAX_I64 : VOP3Inst <"v_max_i64", VOP_I64_I64_I64_DPP, smax>;
+defm V_MAX_U64 : VOP3Inst <"v_max_u64", VOP_I64_I64_I64_DPP, umax>;
+defm V_MIN_I64 : VOP3Inst <"v_min_i64", VOP_I64_I64_I64_DPP, smin>;
+defm V_MIN_U64 : VOP3Inst <"v_min_u64", VOP_I64_I64_I64_DPP, umin>;
+} // End SubtargetPredicate = isGFX1250Plus, SchedRW = [WriteDoubleAdd]
+
} // End isReMaterializable = 1
let Uses = [MODE, VCC, EXEC] in {
@@ -601,8 +625,9 @@ def shl_0_to_4 : PatFrag<
}];
}
-def VOP3_CVT_PK_F8_F32_Profile : VOP3_Profile<VOP_I32_F32_F32, VOP3_OPSEL> {
- defvar Tail = (ins VGPR_32:$vdst_in, op_sel0:$op_sel);
+class VOP3_CVT_PK_F8_F32_Profile<bit _HasClamp = 0> : VOP3_Profile<VOP_I32_F32_F32, VOP3_OPSEL> {
+ defvar Tail = !con(!if(_HasClamp, (ins Clamp:$clamp), (ins)),
+ (ins VGPR_32:$vdst_in, op_sel0:$op_sel));
let InsVOP3OpSel = !con(getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
0, HasModifiers, HasSrc2Mods,
HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret,
@@ -612,12 +637,13 @@ def VOP3_CVT_PK_F8_F32_Profile : VOP3_Profile<VOP_I32_F32_F32, VOP3_OPSEL> {
HasSrc2Mods, HasOMod, Src0ModVOP3DPP, Src1ModVOP3DPP,
Src2ModVOP3DPP, false>.ret,
Tail);
- let HasClamp = 0;
+ let HasClamp = _HasClamp;
let HasExtVOP3DPP = 1;
}
-def VOP3_CVT_PK_F8_F32_Profile_fake16 : VOP3_Profile_Fake16<VOP_I16_F32_F32, VOP3_OPSEL> {
- defvar Tail = (ins VGPR_32:$vdst_in, op_sel0:$op_sel);
+class VOP3_CVT_PK_F8_F32_Profile_fake16<bit _HasClamp = 0> : VOP3_Profile_Fake16<VOP_I16_F32_F32, VOP3_OPSEL> {
+ defvar Tail = !con(!if(_HasClamp, (ins Clamp:$clamp), (ins)),
+ (ins VGPR_32:$vdst_in, op_sel0:$op_sel));
let InsVOP3OpSel = !con(getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
0, HasModifiers, HasSrc2Mods,
HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret,
@@ -627,14 +653,15 @@ def VOP3_CVT_PK_F8_F32_Profile_fake16 : VOP3_Profile_Fake16<VOP_I16_F32_F32, VOP
HasSrc2Mods, HasOMod, Src0ModVOP3DPP, Src1ModVOP3DPP,
Src2ModVOP3DPP, false>.ret,
Tail);
- let HasClamp = 0;
+ let HasClamp = _HasClamp;
let HasExtVOP3DPP = 1;
}
// This t16 profile with vdst_in operand is for backward compatibility and is used
// for user controlled packing
-def VOP3_CVT_PK_F8_F32_Profile_t16 : VOP3_Profile_True16<VOP_I16_F32_F32, VOP3_OPSEL> {
- defvar Tail = (ins VGPR_16:$vdst_in, op_sel0:$op_sel);
+class VOP3_CVT_PK_F8_F32_Profile_t16<bit _HasClamp = 0> : VOP3_Profile_True16<VOP_I16_F32_F32, VOP3_OPSEL> {
+ defvar Tail = !con(!if(_HasClamp, (ins Clamp:$clamp), (ins)),
+ (ins VGPR_16:$vdst_in, op_sel0:$op_sel));
let InsVOP3OpSel = !con(getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
0, HasModifiers, HasSrc2Mods,
HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret,
@@ -644,7 +671,7 @@ def VOP3_CVT_PK_F8_F32_Profile_t16 : VOP3_Profile_True16<VOP_I16_F32_F32, VOP3_O
HasSrc2Mods, HasOMod, Src0ModVOP3DPP, Src1ModVOP3DPP,
Src2ModVOP3DPP, false>.ret,
Tail);
- let HasClamp = 0;
+ let HasClamp = _HasClamp;
let HasExtVOP3DPP = 1;
}
@@ -678,10 +705,10 @@ def VOP3_CVT_SR_F8_F32_Profile : VOP3_Profile<VOPProfile<[i32, f32, i32, f32]>,
HasModifiers, DstVT>.ret);
}
-class VOP3_CVT_SR_F8_ByteSel_Profile<ValueType SrcVT> :
+class VOP3_CVT_SR_F8_ByteSel_Profile<ValueType SrcVT, bit _HasClamp = 0> :
VOP3_Profile<VOPProfile<[i32, SrcVT, i32, untyped]>> {
let HasFP8DstByteSel = 1;
- let HasClamp = 0;
+ let HasClamp = _HasClamp;
}
def IsPow2Plus1: PatLeaf<(i32 imm), [{
@@ -722,6 +749,13 @@ let SubtargetPredicate = HasMinimum3Maximum3F16, ReadsModeReg = 0 in {
defm V_MAXIMUM3_F16 : VOP3Inst_t16 <"v_maximum3_f16", VOP_F16_F16_F16_F16, AMDGPUfmaximum3>;
} // End SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0
+let SubtargetPredicate = HasAddMinMaxInsts, isCommutable = 1, isReMaterializable = 1 in {
+ defm V_ADD_MAX_I32 : VOP3Inst <"v_add_max_i32", VOP_I32_I32_I32_I32>;
+ defm V_ADD_MAX_U32 : VOP3Inst <"v_add_max_u32", VOP_I32_I32_I32_I32>;
+ defm V_ADD_MIN_I32 : VOP3Inst <"v_add_min_i32", VOP_I32_I32_I32_I32>;
+ defm V_ADD_MIN_U32 : VOP3Inst <"v_add_min_u32", VOP_I32_I32_I32_I32>;
+}
+
defm V_ADD_I16 : VOP3Inst_t16 <"v_add_i16", VOP_I16_I16_I16>;
defm V_SUB_I16 : VOP3Inst_t16 <"v_sub_i16", VOP_I16_I16_I16>;
@@ -749,15 +783,23 @@ defm V_LSHL_ADD_U64 : VOP3Inst <"v_lshl_add_u64", V_LSHL_ADD_U64_PROF>;
let OtherPredicates = [HasFP8ConversionInsts], mayRaiseFPException = 0,
SchedRW = [WriteFloatCvt] in {
let Constraints = "$vdst = $vdst_in", DisableEncoding = "$vdst_in" in {
- defm V_CVT_PK_FP8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f32", VOP3_CVT_PK_F8_F32_Profile,
- VOP3_CVT_PK_F8_F32_Profile_t16,
- VOP3_CVT_PK_F8_F32_Profile_fake16>;
- defm V_CVT_PK_BF8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_bf8_f32", VOP3_CVT_PK_F8_F32_Profile,
- VOP3_CVT_PK_F8_F32_Profile_t16,
- VOP3_CVT_PK_F8_F32_Profile_fake16>;
+ let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in
+ defm V_CVT_PK_FP8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f32", VOP3_CVT_PK_F8_F32_Profile<>,
+ VOP3_CVT_PK_F8_F32_Profile_t16<>,
+ VOP3_CVT_PK_F8_F32_Profile_fake16<>>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in
+ defm V_CVT_PK_FP8_F32_gfx1250 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f32_gfx1250", VOP3_CVT_PK_F8_F32_Profile<true>,
+ VOP3_CVT_PK_F8_F32_Profile_t16<true>,
+ VOP3_CVT_PK_F8_F32_Profile_fake16<true>>;
+ defm V_CVT_PK_BF8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_bf8_f32", VOP3_CVT_PK_F8_F32_Profile<>,
+ VOP3_CVT_PK_F8_F32_Profile_t16<>,
+ VOP3_CVT_PK_F8_F32_Profile_fake16<>>;
let SubtargetPredicate = isGFX12Plus in {
- defm V_CVT_SR_FP8_F32_gfx12 : VOP3Inst<"v_cvt_sr_fp8_f32_gfx12", VOP3_CVT_SR_F8_ByteSel_Profile<f32>>;
+ let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in
+ defm V_CVT_SR_FP8_F32_gfx12 : VOP3Inst<"v_cvt_sr_fp8_f32_gfx12", VOP3_CVT_SR_F8_ByteSel_Profile<f32>>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in
+ defm V_CVT_SR_FP8_F32_gfx1250 : VOP3Inst<"v_cvt_sr_fp8_f32_gfx1250", VOP3_CVT_SR_F8_ByteSel_Profile<f32, true>>;
defm V_CVT_SR_BF8_F32_gfx12 : VOP3Inst<"v_cvt_sr_bf8_f32_gfx12", VOP3_CVT_SR_F8_ByteSel_Profile<f32>>;
}
}
@@ -776,6 +818,11 @@ class Cvt_PK_F8_F32_Pat<SDPatternOperator node, int index, VOP3_Pseudo inst> : G
(inst !if(index, SRCMODS.DST_OP_SEL, 0), $src0, 0, $src1, $old, 0)
>;
+class Cvt_PK_F8_F32_E5M3_Pat<SDPatternOperator node, int index, VOP3_Pseudo inst, int Clamp> : GCNPat<
+ (i32 (node f32:$src0, f32:$src1, i32:$old, index)),
+ (inst !if(index, SRCMODS.DST_OP_SEL, 0), $src0, 0, $src1, Clamp, $old, 0)
+>;
+
multiclass Cvt_PK_F8_F32_t16_Pat<SDPatternOperator node, VOP3_Pseudo inst> {
def : GCNPat<
(i32 (node f32:$src0, f32:$src1, i32:$old, -1)),
@@ -791,6 +838,21 @@ def : GCNPat<
>;
}
+multiclass Cvt_PK_F8_F32_E5M3_t16_Pat<SDPatternOperator node, VOP3_Pseudo inst, int Clamp> {
+def : GCNPat<
+ (i32 (node f32:$src0, f32:$src1, i32:$old, -1)),
+ (REG_SEQUENCE VGPR_32,
+ (i16 (EXTRACT_SUBREG $old, lo16)), lo16,
+ (i16 (inst SRCMODS.DST_OP_SEL, $src0, 0, $src1, Clamp, (i16 (EXTRACT_SUBREG $old, hi16)), 0)), hi16)
+>;
+def : GCNPat<
+ (i32 (node f32:$src0, f32:$src1, i32:$old, 0)),
+ (REG_SEQUENCE VGPR_32,
+ (i16 (inst 0, $src0, 0, $src1, Clamp, (i16 (EXTRACT_SUBREG $old, lo16)), 0)), lo16,
+ (i16 (EXTRACT_SUBREG $old, hi16)), hi16)
+>;
+}
+
class Cvt_SR_F8_F32_Pat<SDPatternOperator node, bits<2> index, VOP3_Pseudo inst> : GCNPat<
(i32 (node f32:$src0, i32:$src1, i32:$old, index)),
(inst !if(index{1}, SRCMODS.DST_OP_SEL, 0), $src0, 0, $src1,
@@ -803,21 +865,37 @@ class Cvt_SR_F8_ByteSel_Pat<SDPatternOperator node, VOP3_Pseudo inst, ValueType
(inst $src0_modifiers, $src0, $src1_modifiers, $src1, $old, (as_i32timm $byte_sel))
>;
+class Cvt_SR_F8_ByteSel_E5M3_Pat<SDPatternOperator node, VOP3_Pseudo inst,
+ ValueType SrcVT, int Clamp> : GCNPat<
+ (i32 (node (VOP3Mods SrcVT:$src0, i32:$src0_modifiers), (VOP3Mods i32:$src1, i32:$src1_modifiers),
+ i32:$old, timm:$byte_sel)),
+ (inst $src0_modifiers, $src0, $src1_modifiers, $src1, Clamp, $old, (as_i32timm $byte_sel))
+>;
+
let OtherPredicates = [HasFP8ConversionInsts] in {
foreach Index = [0, -1] in {
let True16Predicate = NotHasTrue16BitInsts in {
- def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_fp8_f32, Index, V_CVT_PK_FP8_F32_e64>;
+ let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in
+ def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_fp8_f32, Index, V_CVT_PK_FP8_F32_e64>;
def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_bf8_f32, Index, V_CVT_PK_BF8_F32_e64>;
}
let True16Predicate = UseFakeTrue16Insts in {
def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_fp8_f32, Index, V_CVT_PK_FP8_F32_fake16_e64>;
def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_bf8_f32, Index, V_CVT_PK_BF8_F32_fake16_e64>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in {
+ def : Cvt_PK_F8_F32_E5M3_Pat<int_amdgcn_cvt_pk_fp8_f32, Index, V_CVT_PK_FP8_F32_gfx1250_fake16_e64, DSTCLAMP.NONE>;
+ def : Cvt_PK_F8_F32_E5M3_Pat<int_amdgcn_cvt_pk_fp8_f32_e5m3, Index, V_CVT_PK_FP8_F32_gfx1250_fake16_e64, DSTCLAMP.ENABLE>;
+ }
}
}
let True16Predicate = UseRealTrue16Insts in {
defm : Cvt_PK_F8_F32_t16_Pat<int_amdgcn_cvt_pk_fp8_f32, V_CVT_PK_FP8_F32_t16_e64>;
defm : Cvt_PK_F8_F32_t16_Pat<int_amdgcn_cvt_pk_bf8_f32, V_CVT_PK_BF8_F32_t16_e64>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in {
+ defm : Cvt_PK_F8_F32_E5M3_t16_Pat<int_amdgcn_cvt_pk_fp8_f32, V_CVT_PK_FP8_F32_gfx1250_t16_e64, DSTCLAMP.NONE>;
+ defm : Cvt_PK_F8_F32_E5M3_t16_Pat<int_amdgcn_cvt_pk_fp8_f32_e5m3, V_CVT_PK_FP8_F32_gfx1250_t16_e64, DSTCLAMP.ENABLE>;
+ }
}
let SubtargetPredicate = isGFX940Plus in {
@@ -828,7 +906,12 @@ let SubtargetPredicate = isGFX940Plus in {
}
let SubtargetPredicate = isGFX12Plus in {
- def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_fp8_f32, V_CVT_SR_FP8_F32_gfx12_e64, f32>;
+ let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_fp8_f32, V_CVT_SR_FP8_F32_gfx12_e64, f32>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in {
+ def : Cvt_SR_F8_ByteSel_E5M3_Pat<int_amdgcn_cvt_sr_fp8_f32, V_CVT_SR_FP8_F32_gfx1250_e64, f32, DSTCLAMP.NONE>;
+ def : Cvt_SR_F8_ByteSel_E5M3_Pat<int_amdgcn_cvt_sr_fp8_f32_e5m3, V_CVT_SR_FP8_F32_gfx1250_e64, f32, DSTCLAMP.ENABLE>;
+ }
def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_bf8_f32, V_CVT_SR_BF8_F32_gfx12_e64, f32>;
}
}
@@ -848,6 +931,9 @@ def : ThreeOp_i32_Pats<and, or, V_AND_OR_B32_e64>;
def : ThreeOp_i32_Pats<or, or, V_OR3_B32_e64>;
def : ThreeOp_i32_Pats<xor, add, V_XAD_U32_e64>;
+let SubtargetPredicate = HasMadU32Inst, AddedComplexity = 10 in
+ def : ThreeOp_i32_Pats<mul, add, V_MAD_U32_e64>;
+
def : GCNPat<
(DivergentBinFrag<mul> i32:$src0, IsPow2Plus1:$src1),
(V_LSHL_ADD_U32_e64 i32:$src0, (i32 (Log2_32 imm:$src1)), i32:$src0)>;
@@ -858,6 +944,13 @@ def : GCNPat<
(V_LSHL_ADD_U64_e64 VSrc_b64:$src0, VSrc_b32:$src1, VSrc_b64:$src2)
>;
+let SubtargetPredicate = HasAddMinMaxInsts in {
+def : ThreeOp_i32_Pats<add, smax, V_ADD_MAX_I32_e64>;
+def : ThreeOp_i32_Pats<add, umax, V_ADD_MAX_U32_e64>;
+def : ThreeOp_i32_Pats<add, smin, V_ADD_MIN_I32_e64>;
+def : ThreeOp_i32_Pats<add, umin, V_ADD_MIN_U32_e64>;
+}
+
def : VOPBinOpClampPat<saddsat, V_ADD_I32_e64, i32>;
def : VOPBinOpClampPat<ssubsat, V_SUB_I32_e64, i32>;
@@ -972,10 +1065,12 @@ class SrcAndDstSelToOpSelXForm<int modifier_idx, bit dest_sel> : SDNodeXForm<tim
unsigned Val = N->getZExtValue();
unsigned New = 0;
if (}] # modifier_idx # [{ == 0) {
- New = (}] # dest_sel # [{ == 1) ? ((Val & 0x2) ? (SISrcMods::OP_SEL_0 | SISrcMods::DST_OP_SEL) : SISrcMods::DST_OP_SEL)
- : ((Val & 0x2) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE);
- } else if (}] # modifier_idx # [{== 1 || }] # modifier_idx # [{ == 2) {
- New = (Val & 0x1) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE;
+ New = (}] # dest_sel # [{ == 1) ? ((Val & 0x1) ? (SISrcMods::OP_SEL_0 | SISrcMods::DST_OP_SEL) : SISrcMods::DST_OP_SEL)
+ : ((Val & 0x1) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE);
+ } else if (}] # modifier_idx # [{== 1) {
+ New = (Val & 0x2) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE;
+ } if (}] # modifier_idx # [{== 2) {
+ New = (Val & 0x1) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE;
}
return CurDAG->getTargetConstant(New, SDLoc(N), MVT::i32);
}]>;
@@ -1427,34 +1522,72 @@ let SubtargetPredicate = isGFX12Plus in {
} // End SubtargetPredicate = isGFX12Plus
-let SubtargetPredicate = HasBitOp3Insts in {
+let HasClamp = 0, HasModifiers = 1 in {
+def BitOp3_B16_Profile : VOP3_BITOP3_Profile<VOPProfile <[i16, i16, i16, i16, i32]>, VOP3_OPSEL>;
+def BitOp3_B16_t16_Profile : VOP3_Profile_True16<BitOp3_B16_Profile>;
+def BitOp3_B16_fake16_Profile : VOP3_Profile_Fake16<BitOp3_B16_Profile>;
+}
+
+let OtherPredicates = [HasBitOp3Insts] in {
let isReMaterializable = 1 in {
- defm V_BITOP3_B16 : VOP3Inst <"v_bitop3_b16",
- VOP3_BITOP3_Profile<VOPProfile <[i16, i16, i16, i16, i32]>, VOP3_OPSEL>>;
+ let SubtargetPredicate = isGFX940Plus in
+ defm V_BITOP3_B16 : VOP3Inst <"v_bitop3_b16", BitOp3_B16_Profile>;
+ let SubtargetPredicate = isGFX1250Plus in
+ defm V_BITOP3_B16_gfx1250 : VOP3Inst_t16_with_profiles <"v_bitop3_b16_gfx1250", BitOp3_B16_Profile,
+ BitOp3_B16_t16_Profile, BitOp3_B16_fake16_Profile>;
defm V_BITOP3_B32 : VOP3Inst <"v_bitop3_b32",
VOP3_BITOP3_Profile<VOPProfile <[i32, i32, i32, i32, i32]>, VOP3_REGULAR>>,
VOPD_Component<0x12, "v_bitop2_b32">;
}
+
def : GCNPat<
(i32 (int_amdgcn_bitop3 i32:$src0, i32:$src1, i32:$src2, i32:$bitop3)),
(i32 (V_BITOP3_B32_e64 VSrc_b32:$src0, VSrc_b32:$src1, VSrc_b32:$src2, timm:$bitop3))
>;
def : GCNPat<
- (i16 (int_amdgcn_bitop3 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)),
- (i16 (V_BITOP3_B16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0))
- >;
-
- def : GCNPat<
(i32 (BITOP3_32 i32:$src0, i32:$src1, i32:$src2, i32:$bitop3)),
(i32 (V_BITOP3_B32_e64 VSrc_b32:$src0, VSrc_b32:$src1, VSrc_b32:$src2, timm:$bitop3))
>;
- def : GCNPat<
- (i16 (BITOP3_16 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)),
- (i16 (V_BITOP3_B16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0))
- >;
-} // End SubtargetPredicate = HasBitOp3Insts
+ let SubtargetPredicate = isGFX940Plus in {
+ def : GCNPat<
+ (i16 (int_amdgcn_bitop3 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)),
+ (i16 (V_BITOP3_B16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0))
+ >;
+
+ def : GCNPat<
+ (i16 (BITOP3_16 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)),
+ (i16 (V_BITOP3_B16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0))
+ >;
+ } // End SubtargetPredicate = isGFX940Plus
+
+ let SubtargetPredicate = isGFX1250Plus in {
+ let True16Predicate = UseFakeTrue16Insts in {
+ def : GCNPat<
+ (i16 (int_amdgcn_bitop3 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)),
+ (i16 (V_BITOP3_B16_gfx1250_fake16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0))
+ >;
+
+ def : GCNPat<
+ (i16 (BITOP3_16 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)),
+ (i16 (V_BITOP3_B16_gfx1250_fake16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0))
+ >;
+ }
+ let True16Predicate = UseRealTrue16Insts in {
+ def : GCNPat<
+ (i16 (int_amdgcn_bitop3 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)),
+ (i16 (V_BITOP3_B16_gfx1250_t16_e64 0, VSrcT_b16:$src0, 0, VSrcT_b16:$src1, 0, VSrcT_b16:$src2, timm:$bitop3, 0))
+ >;
+
+ def : GCNPat<
+ (i16 (BITOP3_16 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)),
+ (i16 (V_BITOP3_B16_gfx1250_t16_e64 0, VSrcT_b16:$src0, 0, VSrcT_b16:$src1, 0, VSrcT_b16:$src2, timm:$bitop3, 0))
+ >;
+ }
+ } // End SubtargetPredicate = isGFX1250Plus
+
+} // End OtherPredicates = [HasBitOp3Insts]
class DivFmasPat<ValueType vt, Instruction inst, Register CondReg> : GCNPat<
(AMDGPUdiv_fmas (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)),
@@ -1531,6 +1664,7 @@ def bf16_fpround : PatFrag <(ops node:$src0), (fpround $src0), [{ return true;
let SubtargetPredicate = HasBF16ConversionInsts in {
let ReadsModeReg = 0 in {
defm V_CVT_PK_BF16_F32 : VOP3Inst<"v_cvt_pk_bf16_f32", VOP3_Profile<VOP_V2BF16_F32_F32>>;
+ defm V_CVT_SR_PK_BF16_F32 : VOP3Inst<"v_cvt_sr_pk_bf16_f32", VOP3_Profile<VOP_V2BF16_F32_F32_I32>, int_amdgcn_cvt_sr_pk_bf16_f32>;
}
def : GCNPat<(v2bf16 (bf16_fpround v2f32:$src)),
(V_CVT_PK_BF16_F32_e64 0, (EXTRACT_SUBREG VReg_64:$src, sub0), 0, (EXTRACT_SUBREG VReg_64:$src, sub1))>;
@@ -1541,6 +1675,85 @@ let SubtargetPredicate = HasBF16ConversionInsts in {
(V_CVT_PK_BF16_F32_e64 $src0_modifiers, $src0, 0, (f32 (IMPLICIT_DEF)))>;
}
+class VOP3_CVT_SCALE_PK_F16_F864_Profile<VOPProfile P> : VOP3_CVT_SCALEF32_PK_F864_Profile<P> {
+ let Src0RC64 = getVOP3VRegSrcForVT<Src0VT>.ret;
+ let Ins64 = !con(getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
+ HasClamp, HasModifiers, HasSrc2Mods,
+ HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret,
+ (ins ScaleSel:$scale_sel));
+ let Asm64 = getAsmVOP3Base<NumSrcArgs, HasDst, HasClamp,
+ HasOpSel, HasOMod, IsVOP3P, HasNeg, HasSrc0Mods, HasSrc1Mods,
+ HasSrc2Mods, DstVT>.ret # "$scale_sel";
+}
+
+multiclass VOP3CvtScaleSelInst<string OpName, VOPProfile P, SDPatternOperator node> {
+ def _e64 : VOP3InstBase<OpName, VOP3_CVT_SCALE_PK_F16_F864_Profile<P>> {
+ let Pattern = [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0)), i32:$src1, i32:$scale_sel))];
+ }
+}
+
+let Src0RC64 = VSrc_NoInline_v2f16 in {
+def VOP3_CVT_PK_F8_F16_Profile : VOP3_Profile<VOP_I16_V2F16>;
+def VOP3_CVT_PK_F8_F16_True16_Profile : VOP3_Profile_True16<VOP3_CVT_PK_F8_F16_Profile>;
+def VOP3_CVT_PK_F8_F16_Fake16_Profile : VOP3_Profile_Fake16<VOP3_CVT_PK_F8_F16_Profile>;
+}
+
+let ReadsModeReg = 0, IsPacked = 0, SubtargetPredicate = isGFX125xOnly in {
+ defm V_CVT_PK_FP8_F16_gfx1250 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f16_gfx1250",
+ VOP3_CVT_PK_F8_F16_Profile,
+ VOP3_CVT_PK_F8_F16_True16_Profile,
+ VOP3_CVT_PK_F8_F16_Fake16_Profile,
+ int_amdgcn_cvt_pk_fp8_f16>;
+ defm V_CVT_PK_BF8_F16_gfx1250 : VOP3Inst_t16_with_profiles<"v_cvt_pk_bf8_f16_gfx1250",
+ VOP3_CVT_PK_F8_F16_Profile,
+ VOP3_CVT_PK_F8_F16_True16_Profile,
+ VOP3_CVT_PK_F8_F16_Fake16_Profile,
+ int_amdgcn_cvt_pk_bf8_f16>;
+}
+
+let HasClamp = 0, HasOpSel = 1 in {
+def VOP3_CVT_SR_F8_F16_Profile : VOP3_CVT_SR_F8_ByteSel_Profile<f16>;
+def VOP3_CVT_SR_F8_F16_True16_Profile : VOP3_Profile_True16<VOP3_CVT_SR_F8_F16_Profile>;
+def VOP3_CVT_SR_F8_F16_Fake16_Profile : VOP3_Profile_Fake16<VOP3_CVT_SR_F8_F16_Profile>;
+}
+
+let SubtargetPredicate = isGFX1250Plus in {
+ let ReadsModeReg = 0 in {
+ defm V_CVT_SR_PK_F16_F32 : VOP3Inst<"v_cvt_sr_pk_f16_f32", VOP3_Profile<VOP_V2F16_F32_F32_I32>, int_amdgcn_cvt_sr_pk_f16_f32>;
+
+ // These instructions have non-standard use of op_sel. They are using bits 2 and 3 of opsel
+ // to select a byte in the vdst. Bits 0 and 1 are unused.
+ let Constraints = "$vdst = $vdst_in", DisableEncoding = "$vdst_in" in {
+ defm V_CVT_SR_FP8_F16 : VOP3Inst_t16_with_profiles<"v_cvt_sr_fp8_f16", VOP3_CVT_SR_F8_F16_Profile,
+ VOP3_CVT_SR_F8_F16_True16_Profile, VOP3_CVT_SR_F8_F16_Fake16_Profile>;
+ defm V_CVT_SR_BF8_F16 : VOP3Inst_t16_with_profiles<"v_cvt_sr_bf8_f16", VOP3_CVT_SR_F8_F16_Profile,
+ VOP3_CVT_SR_F8_F16_True16_Profile, VOP3_CVT_SR_F8_F16_Fake16_Profile>;
+ }
+
+ let Constraints = "@earlyclobber $vdst" in {
+ defm V_CVT_SCALE_PK8_F16_FP8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f16_fp8", VOP_V8F16_V2I32_I32, int_amdgcn_cvt_scale_pk8_f16_fp8>;
+ defm V_CVT_SCALE_PK8_BF16_FP8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_bf16_fp8", VOP_V8BF16_V2I32_I32, int_amdgcn_cvt_scale_pk8_bf16_fp8>;
+ defm V_CVT_SCALE_PK8_F16_BF8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f16_bf8", VOP_V8F16_V2I32_I32, int_amdgcn_cvt_scale_pk8_f16_bf8>;
+ defm V_CVT_SCALE_PK8_BF16_BF8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_bf16_bf8", VOP_V8BF16_V2I32_I32, int_amdgcn_cvt_scale_pk8_bf16_bf8>;
+ defm V_CVT_SCALE_PK8_F32_FP8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f32_fp8", VOP_V8F32_V2I32_I32, int_amdgcn_cvt_scale_pk8_f32_fp8>;
+ defm V_CVT_SCALE_PK8_F32_BF8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f32_bf8", VOP_V8F32_V2I32_I32, int_amdgcn_cvt_scale_pk8_f32_bf8>;
+ } // End Constraints = "@earlyclobber $vdst"
+
+ defm V_CVT_SCALE_PK8_F16_FP4 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f16_fp4", VOP_V8F16_I32_I32, int_amdgcn_cvt_scale_pk8_f16_fp4>;
+ defm V_CVT_SCALE_PK8_BF16_FP4 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_bf16_fp4", VOP_V8BF16_I32_I32, int_amdgcn_cvt_scale_pk8_bf16_fp4>;
+ defm V_CVT_SCALE_PK8_F32_FP4 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f32_fp4", VOP_V8F32_I32_I32, int_amdgcn_cvt_scale_pk8_f32_fp4>;
+ } // End ReadsModeReg = 0
+
+ let True16Predicate = UseRealTrue16Insts in {
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_fp8_f16, V_CVT_SR_FP8_F16_t16_e64, f16>;
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_bf8_f16, V_CVT_SR_BF8_F16_t16_e64, f16>;
+ }
+ let True16Predicate = UseFakeTrue16Insts in {
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_fp8_f16, V_CVT_SR_FP8_F16_fake16_e64, f16>;
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_bf8_f16, V_CVT_SR_BF8_F16_fake16_e64, f16>;
+ }
+} // End SubtargetPredicate = isGFX1250Plus
+
class Cvt_Scale_Sr_F32ToBF16F16_Pat<SDPatternOperator node, VOP3_Pseudo inst, ValueType DstTy> : GCNPat<
(DstTy (node DstTy:$vdst_in, f32:$src0, i32:$src1, timm:$word_sel)),
(inst (DstSelToOpSelXForm $word_sel), $src0, 0, $src1, VGPR_32:$vdst_in)
@@ -1746,10 +1959,20 @@ defm V_MAXIMUM_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x368, "v_m
defm V_PERMLANE16_VAR_B32 : VOP3Only_Real_Base_gfx12<0x30f>;
defm V_PERMLANEX16_VAR_B32 : VOP3Only_Real_Base_gfx12<0x310>;
-defm V_CVT_PK_FP8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x369, "v_cvt_pk_fp8_f32">;
-defm V_CVT_PK_BF8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x36a, "v_cvt_pk_bf8_f32">;
-defm V_CVT_SR_FP8_F32_gfx12 : VOP3_Realtriple_with_name_gfx12<0x36b, "V_CVT_SR_FP8_F32_gfx12", "v_cvt_sr_fp8_f32" >;
-defm V_CVT_SR_BF8_F32_gfx12 : VOP3_Realtriple_with_name_gfx12<0x36c, "V_CVT_SR_BF8_F32_gfx12", "v_cvt_sr_bf8_f32">;
+defm V_BITOP3_B16_gfx1250 : VOP3_Real_BITOP3_t16_and_fake16_gfx1250<0x233, "v_bitop3_b16">;
+defm V_BITOP3_B32 : VOP3_Real_BITOP3_gfx1250<0x234>;
+
+defm V_MAD_U32 : VOP3Only_Realtriple_gfx1250<0x235>;
+defm V_MAD_NC_U64_U32 : VOP3Only_Realtriple_gfx1250<0x2fa>;
+defm V_MAD_NC_I64_I32 : VOP3Only_Realtriple_gfx1250<0x2fb>;
+defm V_MIN_U64 : VOP3Only_Realtriple_gfx1250<0x318>;
+defm V_MAX_U64 : VOP3Only_Realtriple_gfx1250<0x319>;
+defm V_MIN_I64 : VOP3Only_Realtriple_gfx1250<0x31a>;
+defm V_MAX_I64 : VOP3Only_Realtriple_gfx1250<0x31b>;
+defm V_ADD_MAX_I32 : VOP3Only_Realtriple_gfx1250<0x25e>;
+defm V_ADD_MAX_U32 : VOP3Only_Realtriple_gfx1250<0x25f>;
+defm V_ADD_MIN_I32 : VOP3Only_Realtriple_gfx1250<0x260>;
+defm V_ADD_MIN_U32 : VOP3Only_Realtriple_gfx1250<0x261>;
//===----------------------------------------------------------------------===//
// GFX11, GFX12
@@ -1911,6 +2134,13 @@ defm V_AND_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x36
defm V_OR_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x363, "v_or_b16">;
defm V_XOR_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x364, "v_xor_b16">;
+defm V_CVT_PK_FP8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12_not_gfx1250<0x369, "v_cvt_pk_fp8_f32">;
+defm V_CVT_PK_FP8_F32_gfx1250 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x369, "v_cvt_pk_fp8_f32">;
+defm V_CVT_PK_BF8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x36a, "v_cvt_pk_bf8_f32">;
+defm V_CVT_SR_FP8_F32_gfx12 : VOP3_Realtriple_with_name_gfx11_gfx12_not_gfx1250<0x36b, "V_CVT_SR_FP8_F32_gfx12", "v_cvt_sr_fp8_f32">;
+defm V_CVT_SR_FP8_F32_gfx1250 : VOP3Only_Realtriple_with_name_gfx1250<0x36b, "V_CVT_SR_FP8_F32_gfx1250", "v_cvt_sr_fp8_f32">;
+defm V_CVT_SR_BF8_F32_gfx12 : VOP3_Realtriple_with_name_gfx11_gfx12<0x36c, "V_CVT_SR_BF8_F32_gfx12", "v_cvt_sr_bf8_f32">;
+
let AssemblerPredicate = isGFX11Plus in {
def : AMDGPUMnemonicAlias<"v_add3_nc_u32", "v_add3_u32">;
def : AMDGPUMnemonicAlias<"v_xor_add_u32", "v_xad_u32">;
@@ -1918,7 +2148,25 @@ let AssemblerPredicate = isGFX11Plus in {
// These instructions differ from GFX12 variant by supporting DPP:
defm V_LSHL_ADD_U64 : VOP3Only_Realtriple_gfx1250<0x252>;
+defm V_ASHR_PK_I8_I32 : VOP3Only_Realtriple_gfx1250<0x290>;
+defm V_ASHR_PK_U8_I32 : VOP3Only_Realtriple_gfx1250<0x291>;
+defm V_CVT_SCALE_PK8_F16_FP4 : VOP3Only_ScaleSel_Real_gfx1250<0x29f>;
+defm V_CVT_SCALE_PK8_BF16_FP4 : VOP3Only_ScaleSel_Real_gfx1250<0x2a0>;
+defm V_CVT_SCALE_PK8_F32_FP4 : VOP3Only_ScaleSel_Real_gfx1250<0x2a1>;
+defm V_CVT_SCALE_PK8_F16_FP8 : VOP3Only_ScaleSel_Real_gfx1250<0x2a8>;
+defm V_CVT_SCALE_PK8_BF16_FP8 : VOP3Only_ScaleSel_Real_gfx1250<0x2a9>;
+defm V_CVT_SCALE_PK8_F32_FP8 : VOP3Only_ScaleSel_Real_gfx1250<0x2aa>;
+defm V_CVT_SCALE_PK8_F16_BF8 : VOP3Only_ScaleSel_Real_gfx1250<0x2ab>;
+defm V_CVT_SCALE_PK8_BF16_BF8 : VOP3Only_ScaleSel_Real_gfx1250<0x2ac>;
+defm V_CVT_SCALE_PK8_F32_BF8 : VOP3Only_ScaleSel_Real_gfx1250<0x2ad>;
defm V_CVT_PK_BF16_F32 : VOP3Only_Realtriple_gfx1250<0x36d>;
+defm V_CVT_SR_PK_BF16_F32 : VOP3Only_Realtriple_gfx1250<0x36e>;
+defm V_CVT_PK_F16_F32 : VOP3Only_Realtriple_gfx1250<0x36f>;
+defm V_CVT_SR_PK_F16_F32 : VOP3Only_Realtriple_gfx1250<0x370>;
+defm V_CVT_PK_FP8_F16_gfx1250 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x372, "v_cvt_pk_fp8_f16">;
+defm V_CVT_PK_BF8_F16_gfx1250 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x373, "v_cvt_pk_bf8_f16">;
+defm V_CVT_SR_FP8_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x374>;
+defm V_CVT_SR_BF8_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x375>;
//===----------------------------------------------------------------------===//
// GFX10.
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index c21e2d3..f027ab0 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -401,6 +401,26 @@ class VOP3Interp_vi <bits<10> op, VOPProfile P> : VOP3e_vi <op, P> {
let Inst{49-41} = src0;
}
+class VOP3a_BITOP3_gfx12<bits<10> op, VOPProfile p> : VOP3e_gfx11_gfx12<op, p> {
+ bits<8> bitop3;
+
+ let Inst{60-59} = bitop3{7-6};
+ let Inst{10-8} = bitop3{5-3};
+ let Inst{63-61} = bitop3{2-0};
+
+ let Inst{11} = !if(p.HasOpSel, src0_modifiers{2}, 0);
+ let Inst{12} = !if(p.HasOpSel, src1_modifiers{2}, 0);
+ let Inst{13} = !if(p.HasOpSel, src2_modifiers{2}, 0);
+ let Inst{14} = !if(p.HasOpSel, src0_modifiers{3}, 0);
+}
+
+class VOP3a_ScaleSel_gfx1250<bits<10> op, VOPProfile p> : VOP3e_gfx11_gfx12<op, p> {
+ bits<3> scale_sel;
+
+ let Inst{13-11} = scale_sel;
+ let Inst{14} = 0;
+}
+
class VOP3Interp_gfx10<bits<10> op, VOPProfile p> : VOP3e_gfx10<op, p> {
bits<6> attr;
bits<2> attrchan;
@@ -1506,6 +1526,7 @@ class VOP3_Profile_Base<VOPProfile P, VOP3Features Features = VOP3_REGULAR> : VO
let HasFP8SrcByteSel = P.HasFP8SrcByteSel;
let HasFP8DstByteSel = P.HasFP8DstByteSel;
let HasOMod = P.HasOMod;
+ let HasBitOp3 = P.HasBitOp3;
let HasModifiers =
!if (Features.IsMAI, 0,
@@ -1525,6 +1546,7 @@ class VOP3_Profile_True16<VOPProfile P, VOP3Features Features = VOP3_REGULAR> :
let HasFP8SrcByteSel = P.HasFP8SrcByteSel;
let HasFP8DstByteSel = P.HasFP8DstByteSel;
let HasOMod = P.HasOMod;
+ let HasBitOp3 = P.HasBitOp3;
let HasModifiers =
!if (Features.IsMAI, 0,
@@ -1540,6 +1562,7 @@ class VOP3_Profile_Fake16<VOPProfile P, VOP3Features Features = VOP3_REGULAR> :
let HasFP8SrcByteSel = P.HasFP8SrcByteSel;
let HasFP8DstByteSel = P.HasFP8DstByteSel;
let HasOMod = P.HasOMod;
+ let HasBitOp3 = P.HasBitOp3;
let HasModifiers =
!if (Features.IsMAI, 0,
@@ -1723,6 +1746,34 @@ class VOP3b_DPP8_Base<bits<10> op, VOP_Pseudo ps, string opName = ps.OpName>
let Inst{14 - 8} = sdst;
}
+class VOP3_BITOP3_DPP16_Gen<bits<10> op, VOP_DPP_Pseudo p, GFXGen Gen, string asmName>
+ : VOP3_DPP16_Gen_t16<op, p, Gen, asmName> {
+ bits<8> bitop3;
+
+ let Inst{60-59} = bitop3{7-6};
+ let Inst{10-8} = bitop3{5-3};
+ let Inst{63-61} = bitop3{2-0};
+
+ let Inst{11} = !if(p.Pfl.HasOpSel, src0_modifiers{2}, 0);
+ let Inst{12} = !if(p.Pfl.HasOpSel, src1_modifiers{2}, 0);
+ let Inst{13} = !if(p.Pfl.HasOpSel, src2_modifiers{2}, 0);
+ let Inst{14} = !if(p.Pfl.HasOpSel, src0_modifiers{3}, 0);
+}
+
+class VOP3_BITOP3_DPP8<bits<10> op, VOP_Pseudo p, string asmName>
+ : Base_VOP3_DPP8_t16<op, p, asmName> {
+ bits<8> bitop3;
+
+ let Inst{60-59} = bitop3{7-6};
+ let Inst{10-8} = bitop3{5-3};
+ let Inst{63-61} = bitop3{2-0};
+
+ let Inst{11} = !if(p.Pfl.HasOpSel, src0_modifiers{2}, 0);
+ let Inst{12} = !if(p.Pfl.HasOpSel, src1_modifiers{2}, 0);
+ let Inst{13} = !if(p.Pfl.HasOpSel, src2_modifiers{2}, 0);
+ let Inst{14} = !if(p.Pfl.HasOpSel, src0_modifiers{3}, 0);
+}
+
class VOP3b_DPP8_Base_t16<bits<10> op, VOP_Pseudo ps, string opName = ps.OpName>
: Base_VOP3_DPP8<op, ps, opName> {
bits<8> sdst;
@@ -1943,6 +1994,53 @@ multiclass VOP3be_Realtriple<
multiclass VOP3beOnly_Realtriple<GFXGen Gen, bits<10> op> :
VOP3be_Realtriple<Gen, op, 1>;
+multiclass VOP3_BITOP3_Real_dpp_Base<GFXGen Gen, bits<10> op, string asmName> {
+ def _e64_dpp#Gen.Suffix :
+ VOP3_BITOP3_DPP16_Gen<op, !cast<VOP_DPP_Pseudo>(NAME#"_e64"#"_dpp"), Gen, asmName>;
+}
+
+multiclass VOP3_BITOP3_Real_dpp8_Base<GFXGen Gen, bits<10> op, string asmName> {
+ defvar ps = !cast<VOP3_Pseudo>(NAME#"_e64");
+ def _e64_dpp8#Gen.Suffix : VOP3_BITOP3_DPP8<op, ps, asmName> {
+ let DecoderNamespace =
+ Gen.DecoderNamespace #!if (ps.Pfl.IsRealTrue16, "", "_FAKE16");
+ let AssemblerPredicate = Gen.AssemblerPredicate;
+ }
+}
+
+multiclass VOP3_BITOP3_Real_Base<GFXGen Gen, bits<10> op, string asmName> {
+ defvar ps = !cast<VOP_Pseudo>(NAME#"_e64");
+ let IsSingle = ps.Pfl.IsSingle, AsmString = asmName # ps.AsmOperands in {
+ def _e64#Gen.Suffix :
+ VOP3_Real_Gen<ps, Gen>,
+ VOP3a_BITOP3_gfx12<op, ps.Pfl>;
+ }
+}
+
+multiclass VOP3Only_ScaleSel_Real_gfx1250<bits<10> op> {
+ defvar ps = !cast<VOP_Pseudo>(NAME#"_e64");
+ def _e64_gfx1250 :
+ VOP3_Real_Gen<ps, GFX1250Gen>,
+ VOP3a_ScaleSel_gfx1250<op, ps.Pfl>;
+}
+
+multiclass VOP3Only_Realtriple_t16_gfx11_gfx12_not_gfx1250<bits<10> op, string asmName, string opName = NAME,
+ string pseudo_mnemonic = "", bit isSingle = 0> :
+ VOP3_Realtriple_with_name<GFX11Gen, op, opName, asmName, pseudo_mnemonic, isSingle>,
+ VOP3_Realtriple_with_name<GFX12Not12_50Gen, op, opName, asmName, pseudo_mnemonic, isSingle>;
+
+multiclass VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12_not_gfx1250<bits<10> op, string asmName,
+ string opName = NAME, string pseudo_mnemonic = ""> {
+ defm _t16 : VOP3Only_Realtriple_t16_gfx11_gfx12_not_gfx1250<op, asmName, opName#"_t16", pseudo_mnemonic, 1>;
+ defm _fake16 : VOP3Only_Realtriple_t16_gfx11_gfx12_not_gfx1250<op, asmName, opName#"_fake16", pseudo_mnemonic, 1>;
+}
+
+multiclass VOP3_Realtriple_with_name_gfx11_gfx12_not_gfx1250<bits<10> op, string opName,
+ string asmName, string pseudo_mnemonic = "",
+ bit isSingle = 0> :
+ VOP3_Realtriple_with_name<GFX11Gen, op, opName, asmName, pseudo_mnemonic, isSingle>,
+ VOP3_Realtriple_with_name<GFX12Not12_50Gen, op, opName, asmName, pseudo_mnemonic, isSingle>;
+
//===----------------------------------------------------------------------===//
// VOP3 GFX11
//===----------------------------------------------------------------------===//
@@ -2004,6 +2102,15 @@ multiclass VOP3Only_Real_Base_gfx1250<bits<10> op> :
multiclass VOP3Only_Realtriple_gfx1250<bits<10> op, bit isSingle = 0> :
VOP3_Realtriple<GFX1250Gen, op, isSingle>;
+multiclass VOP3Only_Realtriple_with_name_gfx1250<bits<10> op, string opName,
+ string asmName, string pseudo_mnemonic = "",
+ bit isSingle = 0> :
+ VOP3_Realtriple_with_name<GFX1250Gen, op, opName, asmName, pseudo_mnemonic, isSingle>;
+
+multiclass VOP3Only_Realtriple_t16_gfx1250<bits<10> op, string asmName = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
+ string opName = NAME, string pseudo_mnemonic = "", bit isSingle = 0> :
+ VOP3Only_Realtriple_with_name_gfx1250<op, opName, asmName, pseudo_mnemonic, isSingle>;
+
multiclass VOP3_Realtriple_t16_gfx12<bits<10> op, string asmName, string opName = NAME,
string pseudo_mnemonic = "", bit isSingle = 0> :
VOP3_Realtriple_with_name<GFX12Gen, op, opName, asmName, pseudo_mnemonic, isSingle>;
@@ -2024,6 +2131,13 @@ multiclass VOP3Only_Realtriple_t16_and_fake16_gfx12<bits<10> op, string asmName,
defm _fake16 : VOP3Only_Realtriple_t16_gfx12<op, asmName, opName#"_fake16", pseudo_mnemonic>;
}
+multiclass VOP3Only_Realtriple_t16_and_fake16_gfx1250<bits<10> op,
+ string asmName = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
+ string opName = NAME, string pseudo_mnemonic = ""> {
+ defm _t16 : VOP3Only_Realtriple_t16_gfx1250<op, asmName, opName#"_t16", pseudo_mnemonic>;
+ defm _fake16 : VOP3Only_Realtriple_t16_gfx1250<op, asmName, opName#"_fake16", pseudo_mnemonic>;
+}
+
multiclass VOP3be_Real_with_name_gfx12<bits<10> op, string opName,
string asmName, bit isSingle = 0> {
defvar ps = !cast<VOP3_Pseudo>(opName#"_e64");
@@ -2046,6 +2160,16 @@ multiclass VOP3Only_Realtriple_with_name_gfx11_gfx12<bits<10> op, string opName,
VOP3Only_Realtriple_with_name<GFX11Gen, op, opName, asmName>,
VOP3Only_Realtriple_with_name<GFX12Gen, op, opName, asmName>;
+multiclass VOP3_Real_BITOP3_gfx1250<bits<10> op, string asmName = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> :
+ VOP3_BITOP3_Real_Base<GFX1250Gen, op, asmName>,
+ VOP3_BITOP3_Real_dpp_Base<GFX1250Gen, op, asmName>,
+ VOP3_BITOP3_Real_dpp8_Base<GFX1250Gen, op, asmName>;
+
+multiclass VOP3_Real_BITOP3_t16_and_fake16_gfx1250<bits<10> op, string asmName = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic> {
+ defm _t16 : VOP3_Real_BITOP3_gfx1250<op, asmName>;
+ defm _fake16: VOP3_Real_BITOP3_gfx1250<op, asmName>;
+}
+
multiclass VOP3Dot_Realtriple_gfx11_gfx12<bits<10> op, string asmName, bit isSingle = 0,
string opName = NAME> :
VOP3Dot_Realtriple<GFX11Gen, op, asmName, isSingle, opName>,
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 066b392..bd4b75f 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2423,6 +2423,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CallingConv::ID CallConv = CLI.CallConv;
bool doesNotRet = CLI.DoesNotReturn;
bool isVarArg = CLI.IsVarArg;
+ const CallBase *CB = CLI.CB;
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -2446,6 +2447,10 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
!Subtarget->noBTIAtReturnTwice())
GuardWithBTI = AFI->branchTargetEnforcement();
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
+
// Determine whether this is a non-secure function call.
if (CLI.CB && CLI.CB->getAttributes().hasFnAttr("cmse_nonsecure_call"))
isCmseNSCall = true;
diff --git a/llvm/lib/Target/DirectX/DXILRootSignature.cpp b/llvm/lib/Target/DirectX/DXILRootSignature.cpp
index ebdfcaa..a4f5086 100644
--- a/llvm/lib/Target/DirectX/DXILRootSignature.cpp
+++ b/llvm/lib/Target/DirectX/DXILRootSignature.cpp
@@ -17,7 +17,6 @@
#include "llvm/Analysis/DXILMetadataAnalysis.h"
#include "llvm/BinaryFormat/DXContainer.h"
#include "llvm/Frontend/HLSL/RootSignatureMetadata.h"
-#include "llvm/Frontend/HLSL/RootSignatureValidations.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
@@ -111,14 +110,25 @@ analyzeModule(Module &M) {
reportError(Ctx, "Root Element is not a metadata node.");
continue;
}
- mcdxbc::RootSignatureDesc RSD;
- if (std::optional<uint32_t> Version = extractMdIntValue(RSDefNode, 2))
- RSD.Version = *Version;
- else {
+ std::optional<uint32_t> V = extractMdIntValue(RSDefNode, 2);
+ if (!V.has_value()) {
reportError(Ctx, "Invalid RSDefNode value, expected constant int");
continue;
}
+ llvm::hlsl::rootsig::MetadataParser MDParser(RootElementListNode);
+ llvm::Expected<mcdxbc::RootSignatureDesc> RSDOrErr =
+ MDParser.ParseRootSignature(V.value());
+
+ if (!RSDOrErr) {
+ handleAllErrors(RSDOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ Ctx->emitError(EIB.message());
+ });
+ continue;
+ }
+
+ auto &RSD = *RSDOrErr;
+
// Clang emits the root signature data in dxcontainer following a specific
// sequence. First the header, then the root parameters. So the header
// offset will always equal to the header size.
@@ -127,12 +137,6 @@ analyzeModule(Module &M) {
// static sampler offset is calculated when writting dxcontainer.
RSD.StaticSamplersOffset = 0u;
- hlsl::rootsig::MetadataParser MDParser(RootElementListNode);
-
- if (MDParser.ParseRootSignature(Ctx, RSD)) {
- return RSDMap;
- }
-
RSDMap.insert(std::make_pair(F, RSD));
}
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index d96136c..a5bf0e5 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2621,9 +2621,38 @@ LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
SDValue
LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
- if (isa<ConstantSDNode>(Op->getOperand(2)))
+ MVT VT = Op.getSimpleValueType();
+ MVT EltVT = VT.getVectorElementType();
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned EltSizeInBits = EltVT.getScalarSizeInBits();
+ SDLoc DL(Op);
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+ SDValue Op2 = Op.getOperand(2);
+
+ if (isa<ConstantSDNode>(Op2))
return Op;
- return SDValue();
+
+ MVT IdxTy = MVT::getIntegerVT(EltSizeInBits);
+ MVT IdxVTy = MVT::getVectorVT(IdxTy, NumElts);
+
+ if (!isTypeLegal(VT) || !isTypeLegal(IdxVTy))
+ return SDValue();
+
+ SDValue SplatElt = DAG.getSplatBuildVector(VT, DL, Op1);
+ SDValue SplatIdx = DAG.getSplatBuildVector(IdxVTy, DL, Op2);
+
+ SmallVector<SDValue, 32> RawIndices;
+ for (unsigned i = 0; i < NumElts; ++i)
+ RawIndices.push_back(DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
+ SDValue Indices = DAG.getBuildVector(IdxVTy, DL, RawIndices);
+
+ // insert vec, elt, idx
+ // =>
+ // select (splatidx == {0,1,2...}) ? splatelt : vec
+ SDValue SelectCC =
+ DAG.getSetCC(DL, IdxVTy, SplatIdx, Indices, ISD::CondCode::SETEQ);
+ return DAG.getNode(ISD::VSELECT, DL, VT, SelectCC, SplatElt, Op0);
}
SDValue LoongArchTargetLowering::lowerATOMIC_FENCE(SDValue Op,
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
index 7a8395a..d9680c7 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -1034,14 +1034,12 @@ MCELFStreamer &MipsTargetELFStreamer::getStreamer() {
void MipsTargetELFStreamer::emitGPRel32Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(4);
S.addFixup(Value, Mips::fixup_Mips_GPREL32);
S.appendContents(4, 0);
}
void MipsTargetELFStreamer::emitGPRel64Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(8);
// fixup_Mips_GPREL32 desginates R_MIPS_GPREL32+R_MIPS_64 on MIPS64.
S.addFixup(Value, Mips::fixup_Mips_GPREL32);
S.appendContents(8, 0);
@@ -1049,28 +1047,24 @@ void MipsTargetELFStreamer::emitGPRel64Value(const MCExpr *Value) {
void MipsTargetELFStreamer::emitDTPRel32Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(4);
S.addFixup(Value, Mips::fixup_Mips_DTPREL32);
S.appendContents(4, 0);
}
void MipsTargetELFStreamer::emitDTPRel64Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(8);
S.addFixup(Value, Mips::fixup_Mips_DTPREL64);
S.appendContents(8, 0);
}
void MipsTargetELFStreamer::emitTPRel32Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(4);
S.addFixup(Value, Mips::fixup_Mips_TPREL32);
S.appendContents(4, 0);
}
void MipsTargetELFStreamer::emitTPRel64Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(8);
S.addFixup(Value, Mips::fixup_Mips_TPREL64);
S.appendContents(8, 0);
}
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index ec6b382..881ba8e 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -3341,6 +3341,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool &IsTailCall = CLI.IsTailCall;
CallingConv::ID CallConv = CLI.CallConv;
bool IsVarArg = CLI.IsVarArg;
+ const CallBase *CB = CLI.CB;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -3397,8 +3398,11 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Get a count of how many bytes are to be pushed on the stack.
unsigned StackSize = CCInfo.getStackSize();
- // Call site info for function parameters tracking.
+ // Call site info for function parameters tracking and call base type info.
MachineFunction::CallSiteInfo CSInfo;
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
// Check if it's really possible to do a tail call. Restrict it to functions
// that are part of this compilation unit.
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index 8eec915..ee1ca45 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -391,16 +391,6 @@ void NVPTXInstPrinter::printMemOperand(const MCInst *MI, int OpNum,
}
}
-void NVPTXInstPrinter::printOffseti32imm(const MCInst *MI, int OpNum,
- raw_ostream &O) {
- auto &Op = MI->getOperand(OpNum);
- assert(Op.isImm() && "Invalid operand");
- if (Op.getImm() != 0) {
- O << "+";
- printOperand(MI, OpNum, O);
- }
-}
-
void NVPTXInstPrinter::printHexu32imm(const MCInst *MI, int OpNum,
raw_ostream &O) {
int64_t Imm = MI->getOperand(OpNum).getImm();
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h
index c3ff346..92155b0 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h
@@ -46,7 +46,6 @@ public:
StringRef Modifier = {});
void printMemOperand(const MCInst *MI, int OpNum, raw_ostream &O,
StringRef Modifier = {});
- void printOffseti32imm(const MCInst *MI, int OpNum, raw_ostream &O);
void printHexu32imm(const MCInst *MI, int OpNum, raw_ostream &O);
void printProtoIdent(const MCInst *MI, int OpNum, raw_ostream &O);
void printPrmtMode(const MCInst *MI, int OpNum, raw_ostream &O);
diff --git a/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp b/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp
index cd40481..a349609 100644
--- a/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp
@@ -56,15 +56,12 @@ static bool traverseMoveUse(MachineInstr &U, const MachineRegisterInfo &MRI,
case NVPTX::LD_i16:
case NVPTX::LD_i32:
case NVPTX::LD_i64:
- case NVPTX::LD_i8:
case NVPTX::LDV_i16_v2:
case NVPTX::LDV_i16_v4:
case NVPTX::LDV_i32_v2:
case NVPTX::LDV_i32_v4:
case NVPTX::LDV_i64_v2:
- case NVPTX::LDV_i64_v4:
- case NVPTX::LDV_i8_v2:
- case NVPTX::LDV_i8_v4: {
+ case NVPTX::LDV_i64_v4: {
LoadInsts.push_back(&U);
return true;
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 96f52275..6068035 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -56,9 +56,7 @@ INITIALIZE_PASS(NVPTXDAGToDAGISelLegacy, DEBUG_TYPE, PASS_NAME, false, false)
NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
CodeGenOptLevel OptLevel)
- : SelectionDAGISel(tm, OptLevel), TM(tm) {
- doMulWide = (OptLevel > CodeGenOptLevel::None);
-}
+ : SelectionDAGISel(tm, OptLevel), TM(tm) {}
bool NVPTXDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
Subtarget = &MF.getSubtarget<NVPTXSubtarget>();
@@ -1005,14 +1003,10 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
// Helper function template to reduce amount of boilerplate code for
// opcode selection.
static std::optional<unsigned>
-pickOpcodeForVT(MVT::SimpleValueType VT, std::optional<unsigned> Opcode_i8,
- std::optional<unsigned> Opcode_i16,
+pickOpcodeForVT(MVT::SimpleValueType VT, std::optional<unsigned> Opcode_i16,
std::optional<unsigned> Opcode_i32,
std::optional<unsigned> Opcode_i64) {
switch (VT) {
- case MVT::i1:
- case MVT::i8:
- return Opcode_i8;
case MVT::f16:
case MVT::i16:
case MVT::bf16:
@@ -1080,8 +1074,8 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
Chain};
const MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
- const std::optional<unsigned> Opcode = pickOpcodeForVT(
- TargetVT, NVPTX::LD_i8, NVPTX::LD_i16, NVPTX::LD_i32, NVPTX::LD_i64);
+ const std::optional<unsigned> Opcode =
+ pickOpcodeForVT(TargetVT, NVPTX::LD_i16, NVPTX::LD_i32, NVPTX::LD_i64);
if (!Opcode)
return false;
@@ -1166,17 +1160,15 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
default:
llvm_unreachable("Unexpected opcode");
case NVPTXISD::LoadV2:
- Opcode =
- pickOpcodeForVT(EltVT.SimpleTy, NVPTX::LDV_i8_v2, NVPTX::LDV_i16_v2,
- NVPTX::LDV_i32_v2, NVPTX::LDV_i64_v2);
+ Opcode = pickOpcodeForVT(EltVT.SimpleTy, NVPTX::LDV_i16_v2,
+ NVPTX::LDV_i32_v2, NVPTX::LDV_i64_v2);
break;
case NVPTXISD::LoadV4:
- Opcode =
- pickOpcodeForVT(EltVT.SimpleTy, NVPTX::LDV_i8_v4, NVPTX::LDV_i16_v4,
- NVPTX::LDV_i32_v4, NVPTX::LDV_i64_v4);
+ Opcode = pickOpcodeForVT(EltVT.SimpleTy, NVPTX::LDV_i16_v4,
+ NVPTX::LDV_i32_v4, NVPTX::LDV_i64_v4);
break;
case NVPTXISD::LoadV8:
- Opcode = pickOpcodeForVT(EltVT.SimpleTy, {/* no v8i8 */}, {/* no v8i16 */},
+ Opcode = pickOpcodeForVT(EltVT.SimpleTy, {/* no v8i16 */},
NVPTX::LDV_i32_v8, {/* no v8i64 */});
break;
}
@@ -1232,22 +1224,21 @@ bool NVPTXDAGToDAGISel::tryLDG(MemSDNode *LD) {
default:
llvm_unreachable("Unexpected opcode");
case ISD::LOAD:
- Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_i8,
- NVPTX::LD_GLOBAL_NC_i16, NVPTX::LD_GLOBAL_NC_i32,
- NVPTX::LD_GLOBAL_NC_i64);
+ Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_i16,
+ NVPTX::LD_GLOBAL_NC_i32, NVPTX::LD_GLOBAL_NC_i64);
break;
case NVPTXISD::LoadV2:
- Opcode = pickOpcodeForVT(
- TargetVT, NVPTX::LD_GLOBAL_NC_v2i8, NVPTX::LD_GLOBAL_NC_v2i16,
- NVPTX::LD_GLOBAL_NC_v2i32, NVPTX::LD_GLOBAL_NC_v2i64);
+ Opcode =
+ pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_v2i16,
+ NVPTX::LD_GLOBAL_NC_v2i32, NVPTX::LD_GLOBAL_NC_v2i64);
break;
case NVPTXISD::LoadV4:
- Opcode = pickOpcodeForVT(
- TargetVT, NVPTX::LD_GLOBAL_NC_v4i8, NVPTX::LD_GLOBAL_NC_v4i16,
- NVPTX::LD_GLOBAL_NC_v4i32, NVPTX::LD_GLOBAL_NC_v4i64);
+ Opcode =
+ pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_v4i16,
+ NVPTX::LD_GLOBAL_NC_v4i32, NVPTX::LD_GLOBAL_NC_v4i64);
break;
case NVPTXISD::LoadV8:
- Opcode = pickOpcodeForVT(TargetVT, {/* no v8i8 */}, {/* no v8i16 */},
+ Opcode = pickOpcodeForVT(TargetVT, {/* no v8i16 */},
NVPTX::LD_GLOBAL_NC_v8i32, {/* no v8i64 */});
break;
}
@@ -1278,8 +1269,9 @@ bool NVPTXDAGToDAGISel::tryLDU(SDNode *N) {
break;
}
- const MVT::SimpleValueType SelectVT =
- MVT::getIntegerVT(LD->getMemoryVT().getSizeInBits() / NumElts).SimpleTy;
+ SDLoc DL(N);
+ const unsigned FromTypeWidth = LD->getMemoryVT().getSizeInBits() / NumElts;
+ const MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
// If this is an LDU intrinsic, the address is the third operand. If its an
// LDU SD node (from custom vector handling), then its the second operand
@@ -1288,32 +1280,28 @@ bool NVPTXDAGToDAGISel::tryLDU(SDNode *N) {
SDValue Base, Offset;
SelectADDR(Addr, Base, Offset);
- SDValue Ops[] = {Base, Offset, LD->getChain()};
+ SDValue Ops[] = {getI32Imm(FromTypeWidth, DL), Base, Offset, LD->getChain()};
std::optional<unsigned> Opcode;
switch (N->getOpcode()) {
default:
llvm_unreachable("Unexpected opcode");
case ISD::INTRINSIC_W_CHAIN:
- Opcode =
- pickOpcodeForVT(SelectVT, NVPTX::LDU_GLOBAL_i8, NVPTX::LDU_GLOBAL_i16,
- NVPTX::LDU_GLOBAL_i32, NVPTX::LDU_GLOBAL_i64);
+ Opcode = pickOpcodeForVT(TargetVT, NVPTX::LDU_GLOBAL_i16,
+ NVPTX::LDU_GLOBAL_i32, NVPTX::LDU_GLOBAL_i64);
break;
case NVPTXISD::LDUV2:
- Opcode = pickOpcodeForVT(SelectVT, NVPTX::LDU_GLOBAL_v2i8,
- NVPTX::LDU_GLOBAL_v2i16, NVPTX::LDU_GLOBAL_v2i32,
- NVPTX::LDU_GLOBAL_v2i64);
+ Opcode = pickOpcodeForVT(TargetVT, NVPTX::LDU_GLOBAL_v2i16,
+ NVPTX::LDU_GLOBAL_v2i32, NVPTX::LDU_GLOBAL_v2i64);
break;
case NVPTXISD::LDUV4:
- Opcode = pickOpcodeForVT(SelectVT, NVPTX::LDU_GLOBAL_v4i8,
- NVPTX::LDU_GLOBAL_v4i16, NVPTX::LDU_GLOBAL_v4i32,
- {/* no v4i64 */});
+ Opcode = pickOpcodeForVT(TargetVT, NVPTX::LDU_GLOBAL_v4i16,
+ NVPTX::LDU_GLOBAL_v4i32, {/* no v4i64 */});
break;
}
if (!Opcode)
return false;
- SDLoc DL(N);
SDNode *NVPTXLDU = CurDAG->getMachineNode(*Opcode, DL, LD->getVTList(), Ops);
ReplaceNode(LD, NVPTXLDU);
@@ -1364,8 +1352,8 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
Chain};
const std::optional<unsigned> Opcode =
- pickOpcodeForVT(Value.getSimpleValueType().SimpleTy, NVPTX::ST_i8,
- NVPTX::ST_i16, NVPTX::ST_i32, NVPTX::ST_i64);
+ pickOpcodeForVT(Value.getSimpleValueType().SimpleTy, NVPTX::ST_i16,
+ NVPTX::ST_i32, NVPTX::ST_i64);
if (!Opcode)
return false;
@@ -1425,16 +1413,16 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
default:
return false;
case NVPTXISD::StoreV2:
- Opcode = pickOpcodeForVT(EltVT, NVPTX::STV_i8_v2, NVPTX::STV_i16_v2,
- NVPTX::STV_i32_v2, NVPTX::STV_i64_v2);
+ Opcode = pickOpcodeForVT(EltVT, NVPTX::STV_i16_v2, NVPTX::STV_i32_v2,
+ NVPTX::STV_i64_v2);
break;
case NVPTXISD::StoreV4:
- Opcode = pickOpcodeForVT(EltVT, NVPTX::STV_i8_v4, NVPTX::STV_i16_v4,
- NVPTX::STV_i32_v4, NVPTX::STV_i64_v4);
+ Opcode = pickOpcodeForVT(EltVT, NVPTX::STV_i16_v4, NVPTX::STV_i32_v4,
+ NVPTX::STV_i64_v4);
break;
case NVPTXISD::StoreV8:
- Opcode = pickOpcodeForVT(EltVT, {/* no v8i8 */}, {/* no v8i16 */},
- NVPTX::STV_i32_v8, {/* no v8i64 */});
+ Opcode = pickOpcodeForVT(EltVT, {/* no v8i16 */}, NVPTX::STV_i32_v8,
+ {/* no v8i64 */});
break;
}
@@ -1689,10 +1677,11 @@ bool NVPTXDAGToDAGISel::tryBF16ArithToFMA(SDNode *N) {
auto API = APF.bitcastToAPInt();
API = API.concat(API);
auto Const = CurDAG->getTargetConstant(API, DL, MVT::i32);
- return SDValue(CurDAG->getMachineNode(NVPTX::IMOV32i, DL, VT, Const), 0);
+ return SDValue(CurDAG->getMachineNode(NVPTX::MOV_B32_i, DL, VT, Const),
+ 0);
}
auto Const = CurDAG->getTargetConstantFP(APF, DL, VT);
- return SDValue(CurDAG->getMachineNode(NVPTX::BFMOV16i, DL, VT, Const), 0);
+ return SDValue(CurDAG->getMachineNode(NVPTX::MOV_BF16_i, DL, VT, Const), 0);
};
switch (N->getOpcode()) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
index e504a8f..9e0f88e5 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -40,9 +40,6 @@ private:
class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel {
const NVPTXTargetMachine &TM;
- // If true, generate mul.wide from sext and mul
- bool doMulWide;
-
NVPTX::DivPrecisionLevel getDivF32Level(const SDNode *N) const;
bool usePrecSqrtF32(const SDNode *N) const;
bool useF32FTZ() const;
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index f79b862..65d1be3 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -843,7 +843,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::EXTRACT_VECTOR_ELT, ISD::FADD,
ISD::MUL, ISD::SHL, ISD::SREM, ISD::UREM, ISD::VSELECT,
ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::LOAD,
- ISD::STORE});
+ ISD::STORE, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND});
// setcc for f16x2 and bf16x2 needs special handling to prevent
// legalizer's attempt to scalarize it due to v2i1 not being legal.
@@ -4917,7 +4917,6 @@ combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
return SDValue();
auto *LD = cast<MemSDNode>(N);
- EVT MemVT = LD->getMemoryVT();
SDLoc DL(LD);
// the new opcode after we double the number of operands
@@ -4958,9 +4957,9 @@ combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
NewVTs.append(LD->value_begin() + OldNumOutputs, LD->value_end());
// Create the new load
- SDValue NewLoad =
- DCI.DAG.getMemIntrinsicNode(Opcode, DL, DCI.DAG.getVTList(NewVTs),
- Operands, MemVT, LD->getMemOperand());
+ SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
+ Opcode, DL, DCI.DAG.getVTList(NewVTs), Operands, LD->getMemoryVT(),
+ LD->getMemOperand());
// Now we use a combination of BUILD_VECTORs and a MERGE_VALUES node to keep
// the outputs the same. These nodes will be optimized away in later
@@ -5002,7 +5001,6 @@ static SDValue combinePackingMovIntoStore(SDNode *N,
return SDValue();
auto *ST = cast<MemSDNode>(N);
- EVT MemVT = ElementVT.getVectorElementType();
// The new opcode after we double the number of operands.
NVPTXISD::NodeType Opcode;
@@ -5011,11 +5009,9 @@ static SDValue combinePackingMovIntoStore(SDNode *N,
// Any packed type is legal, so the legalizer will not have lowered
// ISD::STORE -> NVPTXISD::Store (unless it's under-aligned). We have to do
// it here.
- MemVT = ST->getMemoryVT();
Opcode = NVPTXISD::StoreV2;
break;
case NVPTXISD::StoreV2:
- MemVT = ST->getMemoryVT();
Opcode = NVPTXISD::StoreV4;
break;
case NVPTXISD::StoreV4:
@@ -5066,7 +5062,7 @@ static SDValue combinePackingMovIntoStore(SDNode *N,
// Now we replace the store
return DCI.DAG.getMemIntrinsicNode(Opcode, SDLoc(N), N->getVTList(), Operands,
- MemVT, ST->getMemOperand());
+ ST->getMemoryVT(), ST->getMemOperand());
}
static SDValue PerformStoreCombine(SDNode *N,
@@ -5219,6 +5215,42 @@ static SDValue PerformREMCombine(SDNode *N,
return SDValue();
}
+// (sign_extend|zero_extend (mul|shl) x, y) -> (mul.wide x, y)
+static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOptLevel OptLevel) {
+ if (OptLevel == CodeGenOptLevel::None)
+ return SDValue();
+
+ SDValue Op = N->getOperand(0);
+ if (!Op.hasOneUse())
+ return SDValue();
+ EVT ToVT = N->getValueType(0);
+ EVT FromVT = Op.getValueType();
+ if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
+ (ToVT == MVT::i64 && FromVT == MVT::i32)))
+ return SDValue();
+ if (!(Op.getOpcode() == ISD::MUL ||
+ (Op.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Op.getOperand(1)))))
+ return SDValue();
+
+ SDLoc DL(N);
+ unsigned ExtOpcode = N->getOpcode();
+ unsigned Opcode = 0;
+ if (ExtOpcode == ISD::SIGN_EXTEND && Op->getFlags().hasNoSignedWrap())
+ Opcode = NVPTXISD::MUL_WIDE_SIGNED;
+ else if (ExtOpcode == ISD::ZERO_EXTEND && Op->getFlags().hasNoUnsignedWrap())
+ Opcode = NVPTXISD::MUL_WIDE_UNSIGNED;
+ else
+ return SDValue();
+ SDValue RHS = Op.getOperand(1);
+ if (Op.getOpcode() == ISD::SHL) {
+ const auto ShiftAmt = Op.getConstantOperandVal(1);
+ const auto MulVal = APInt(ToVT.getSizeInBits(), 1) << ShiftAmt;
+ RHS = DCI.DAG.getConstant(MulVal, DL, ToVT);
+ }
+ return DCI.DAG.getNode(Opcode, DL, ToVT, Op.getOperand(0), RHS);
+}
+
enum OperandSignedness {
Signed = 0,
Unsigned,
@@ -5825,6 +5857,9 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
return combineADDRSPACECAST(N, DCI);
case ISD::AND:
return PerformANDCombine(N, DCI);
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ return combineMulWide(N, DCI, OptLevel);
case ISD::BUILD_VECTOR:
return PerformBUILD_VECTORCombine(N, DCI);
case ISD::EXTRACT_VECTOR_ELT:
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td b/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
index 86dcb4a..719be03 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
@@ -11,15 +11,9 @@
//
//===----------------------------------------------------------------------===//
-// Vector instruction type enum
-class VecInstTypeEnum<bits<4> val> {
- bits<4> Value=val;
-}
-def VecNOP : VecInstTypeEnum<0>;
-
// Generic NVPTX Format
-class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern = []>
: Instruction {
field bits<14> Inst;
@@ -30,7 +24,6 @@ class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern>
let Pattern = pattern;
// TSFlagFields
- bits<4> VecInstType = VecNOP.Value;
bit IsLoad = false;
bit IsStore = false;
@@ -45,7 +38,6 @@ class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// 2**(2-1) = 2.
bits<2> IsSuld = 0;
- let TSFlags{3...0} = VecInstType;
let TSFlags{4} = IsLoad;
let TSFlags{5} = IsStore;
let TSFlags{6} = IsTex;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
index e218ef1..34fe467 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -35,23 +35,23 @@ void NVPTXInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
const TargetRegisterClass *DestRC = MRI.getRegClass(DestReg);
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
- if (RegInfo.getRegSizeInBits(*DestRC) != RegInfo.getRegSizeInBits(*SrcRC))
+ if (DestRC != SrcRC)
report_fatal_error("Copy one register into another with a different width");
unsigned Op;
- if (DestRC == &NVPTX::B1RegClass) {
- Op = NVPTX::IMOV1r;
- } else if (DestRC == &NVPTX::B16RegClass) {
- Op = NVPTX::MOV16r;
- } else if (DestRC == &NVPTX::B32RegClass) {
- Op = NVPTX::IMOV32r;
- } else if (DestRC == &NVPTX::B64RegClass) {
- Op = NVPTX::IMOV64r;
- } else if (DestRC == &NVPTX::B128RegClass) {
- Op = NVPTX::IMOV128r;
- } else {
+ if (DestRC == &NVPTX::B1RegClass)
+ Op = NVPTX::MOV_B1_r;
+ else if (DestRC == &NVPTX::B16RegClass)
+ Op = NVPTX::MOV_B16_r;
+ else if (DestRC == &NVPTX::B32RegClass)
+ Op = NVPTX::MOV_B32_r;
+ else if (DestRC == &NVPTX::B64RegClass)
+ Op = NVPTX::MOV_B64_r;
+ else if (DestRC == &NVPTX::B128RegClass)
+ Op = NVPTX::MOV_B128_r;
+ else
llvm_unreachable("Bad register copy");
- }
+
BuildMI(MBB, I, DL, get(Op), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 86d6f7c..d8047d3 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -15,19 +15,8 @@ include "NVPTXInstrFormats.td"
let OperandType = "OPERAND_IMMEDIATE" in {
def f16imm : Operand<f16>;
def bf16imm : Operand<bf16>;
-
}
-// List of vector specific properties
-def isVecLD : VecInstTypeEnum<1>;
-def isVecST : VecInstTypeEnum<2>;
-def isVecBuild : VecInstTypeEnum<3>;
-def isVecShuffle : VecInstTypeEnum<4>;
-def isVecExtract : VecInstTypeEnum<5>;
-def isVecInsert : VecInstTypeEnum<6>;
-def isVecDest : VecInstTypeEnum<7>;
-def isVecOther : VecInstTypeEnum<15>;
-
//===----------------------------------------------------------------------===//
// NVPTX Operand Definitions.
//===----------------------------------------------------------------------===//
@@ -125,8 +114,6 @@ def doF32FTZ : Predicate<"useF32FTZ()">;
def doNoF32FTZ : Predicate<"!useF32FTZ()">;
def doRsqrtOpt : Predicate<"doRsqrtOpt()">;
-def doMulWide : Predicate<"doMulWide">;
-
def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
def hasDotInstructions : Predicate<"Subtarget->hasDotInstructions()">;
@@ -486,46 +473,28 @@ let hasSideEffects = false in {
// takes a CvtMode immediate that defines the conversion mode to use. It can
// be CvtNONE to omit a conversion mode.
multiclass CVT_FROM_ALL<string ToType, RegisterClass RC, list<Predicate> Preds = []> {
- def _s8 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B16:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".s8">,
- Requires<Preds>;
- def _u8 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B16:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".u8">,
- Requires<Preds>;
- def _s16 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B16:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".s16">,
- Requires<Preds>;
- def _u16 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B16:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".u16">,
- Requires<Preds>;
- def _s32 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B32:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".s32">,
- Requires<Preds>;
- def _u32 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B32:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".u32">,
- Requires<Preds>;
- def _s64 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B64:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".s64">,
- Requires<Preds>;
- def _u64 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B64:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".u64">,
- Requires<Preds>;
+ foreach sign = ["s", "u"] in {
+ def _ # sign # "8" :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B16:$src), (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # "." # sign # "8">,
+ Requires<Preds>;
+ def _ # sign # "16" :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B16:$src), (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # "." # sign # "16">,
+ Requires<Preds>;
+ def _ # sign # "32" :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B32:$src), (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # "." # sign # "32">,
+ Requires<Preds>;
+ def _ # sign # "64" :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B64:$src), (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # "." # sign # "64">,
+ Requires<Preds>;
+ }
def _f16 :
BasicFlagsNVPTXInst<(outs RC:$dst),
(ins B16:$src), (ins CvtMode:$mode),
@@ -556,14 +525,12 @@ let hasSideEffects = false in {
}
// Generate cvts from all types to all types.
- defm CVT_s8 : CVT_FROM_ALL<"s8", B16>;
- defm CVT_u8 : CVT_FROM_ALL<"u8", B16>;
- defm CVT_s16 : CVT_FROM_ALL<"s16", B16>;
- defm CVT_u16 : CVT_FROM_ALL<"u16", B16>;
- defm CVT_s32 : CVT_FROM_ALL<"s32", B32>;
- defm CVT_u32 : CVT_FROM_ALL<"u32", B32>;
- defm CVT_s64 : CVT_FROM_ALL<"s64", B64>;
- defm CVT_u64 : CVT_FROM_ALL<"u64", B64>;
+ foreach sign = ["s", "u"] in {
+ defm CVT_ # sign # "8" : CVT_FROM_ALL<sign # "8", B16>;
+ defm CVT_ # sign # "16" : CVT_FROM_ALL<sign # "16", B16>;
+ defm CVT_ # sign # "32" : CVT_FROM_ALL<sign # "32", B32>;
+ defm CVT_ # sign # "64" : CVT_FROM_ALL<sign # "64", B64>;
+ }
defm CVT_f16 : CVT_FROM_ALL<"f16", B16>;
defm CVT_bf16 : CVT_FROM_ALL<"bf16", B16, [hasPTX<78>, hasSM<90>]>;
defm CVT_f32 : CVT_FROM_ALL<"f32", B32>;
@@ -571,18 +538,12 @@ let hasSideEffects = false in {
// These cvts are different from those above: The source and dest registers
// are of the same type.
- def CVT_INREG_s16_s8 : BasicNVPTXInst<(outs B16:$dst), (ins B16:$src),
- "cvt.s16.s8">;
- def CVT_INREG_s32_s8 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src),
- "cvt.s32.s8">;
- def CVT_INREG_s32_s16 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src),
- "cvt.s32.s16">;
- def CVT_INREG_s64_s8 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
- "cvt.s64.s8">;
- def CVT_INREG_s64_s16 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
- "cvt.s64.s16">;
- def CVT_INREG_s64_s32 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
- "cvt.s64.s32">;
+ def CVT_INREG_s16_s8 : BasicNVPTXInst<(outs B16:$dst), (ins B16:$src), "cvt.s16.s8">;
+ def CVT_INREG_s32_s8 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src), "cvt.s32.s8">;
+ def CVT_INREG_s32_s16 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src), "cvt.s32.s16">;
+ def CVT_INREG_s64_s8 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src), "cvt.s64.s8">;
+ def CVT_INREG_s64_s16 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src), "cvt.s64.s16">;
+ def CVT_INREG_s64_s32 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src), "cvt.s64.s32">;
multiclass CVT_FROM_FLOAT_V2_SM80<string FromName, RegisterClass RC> {
def _f32 :
@@ -784,7 +745,7 @@ defm SUB : I3<"sub.s", sub, commutative = false>;
def ADD16x2 : I16x2<"add.s", add>;
-// in32 and int64 addition and subtraction with carry-out.
+// int32 and int64 addition and subtraction with carry-out.
defm ADDCC : ADD_SUB_INT_CARRY<"add.cc", addc, commutative = true>;
defm SUBCC : ADD_SUB_INT_CARRY<"sub.cc", subc, commutative = false>;
@@ -805,17 +766,17 @@ defm UDIV : I3<"div.u", udiv, commutative = false>;
defm SREM : I3<"rem.s", srem, commutative = false>;
defm UREM : I3<"rem.u", urem, commutative = false>;
-// Integer absolute value. NumBits should be one minus the bit width of RC.
-// This idiom implements the algorithm at
-// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
-multiclass ABS<ValueType T, RegisterClass RC, string SizeName> {
- def : BasicNVPTXInst<(outs RC:$dst), (ins RC:$a),
- "abs" # SizeName,
- [(set T:$dst, (abs T:$a))]>;
+foreach t = [I16RT, I32RT, I64RT] in {
+ def ABS_S # t.Size :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a),
+ "abs.s" # t.Size,
+ [(set t.Ty:$dst, (abs t.Ty:$a))]>;
+
+ def NEG_S # t.Size :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src),
+ "neg.s" # t.Size,
+ [(set t.Ty:$dst, (ineg t.Ty:$src))]>;
}
-defm ABS_16 : ABS<i16, B16, ".s16">;
-defm ABS_32 : ABS<i32, B32, ".s32">;
-defm ABS_64 : ABS<i64, B64, ".s64">;
// Integer min/max.
defm SMAX : I3<"max.s", smax, commutative = true>;
@@ -832,170 +793,63 @@ def UMIN16x2 : I16x2<"min.u", umin>;
//
// Wide multiplication
//
-def MULWIDES64 :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, B32:$b), "mul.wide.s32">;
-def MULWIDES64Imm :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, i32imm:$b), "mul.wide.s32">;
-def MULWIDES64Imm64 :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, i64imm:$b), "mul.wide.s32">;
-
-def MULWIDEU64 :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, B32:$b), "mul.wide.u32">;
-def MULWIDEU64Imm :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, i32imm:$b), "mul.wide.u32">;
-def MULWIDEU64Imm64 :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, i64imm:$b), "mul.wide.u32">;
-
-def MULWIDES32 :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, B16:$b), "mul.wide.s16">;
-def MULWIDES32Imm :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, i16imm:$b), "mul.wide.s16">;
-def MULWIDES32Imm32 :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, i32imm:$b), "mul.wide.s16">;
-
-def MULWIDEU32 :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, B16:$b), "mul.wide.u16">;
-def MULWIDEU32Imm :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, i16imm:$b), "mul.wide.u16">;
-def MULWIDEU32Imm32 :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, i32imm:$b), "mul.wide.u16">;
-
-def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
-def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
-def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
-
-// Matchers for signed, unsigned mul.wide ISD nodes.
-let Predicates = [doMulWide] in {
- def : Pat<(i32 (mul_wide_signed i16:$a, i16:$b)), (MULWIDES32 $a, $b)>;
- def : Pat<(i32 (mul_wide_signed i16:$a, imm:$b)), (MULWIDES32Imm $a, imm:$b)>;
- def : Pat<(i32 (mul_wide_unsigned i16:$a, i16:$b)), (MULWIDEU32 $a, $b)>;
- def : Pat<(i32 (mul_wide_unsigned i16:$a, imm:$b)), (MULWIDEU32Imm $a, imm:$b)>;
-
- def : Pat<(i64 (mul_wide_signed i32:$a, i32:$b)), (MULWIDES64 $a, $b)>;
- def : Pat<(i64 (mul_wide_signed i32:$a, imm:$b)), (MULWIDES64Imm $a, imm:$b)>;
- def : Pat<(i64 (mul_wide_unsigned i32:$a, i32:$b)), (MULWIDEU64 $a, $b)>;
- def : Pat<(i64 (mul_wide_unsigned i32:$a, imm:$b)), (MULWIDEU64Imm $a, imm:$b)>;
-}
-
-// Predicates used for converting some patterns to mul.wide.
-def SInt32Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isSignedIntN(32);
-}]>;
-
-def UInt32Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isIntN(32);
-}]>;
-
-def SInt16Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isSignedIntN(16);
-}]>;
-
-def UInt16Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isIntN(16);
-}]>;
-
-def IntConst_0_30 : PatLeaf<(imm), [{
- // Check if 0 <= v < 31; only then will the result of (x << v) be an int32.
- const APInt &v = N->getAPIntValue();
- return v.sge(0) && v.slt(31);
-}]>;
-
-def IntConst_0_14 : PatLeaf<(imm), [{
- // Check if 0 <= v < 15; only then will the result of (x << v) be an int16.
- const APInt &v = N->getAPIntValue();
- return v.sge(0) && v.slt(15);
-}]>;
-
-def SHL2MUL32 : SDNodeXForm<imm, [{
- const APInt &v = N->getAPIntValue();
- APInt temp(32, 1);
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
-}]>;
-def SHL2MUL16 : SDNodeXForm<imm, [{
- const APInt &v = N->getAPIntValue();
- APInt temp(16, 1);
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
-}]>;
+def SDTMulWide : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, SDTCisSameAs<1, 2>]>;
+def smul_wide : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide, [SDNPCommutative]>;
+def umul_wide : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide, [SDNPCommutative]>;
-// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
-let Predicates = [doMulWide] in {
- def : Pat<(shl (sext i32:$a), (i32 IntConst_0_30:$b)),
- (MULWIDES64Imm $a, (SHL2MUL32 $b))>;
- def : Pat<(shl (zext i32:$a), (i32 IntConst_0_30:$b)),
- (MULWIDEU64Imm $a, (SHL2MUL32 $b))>;
- def : Pat<(shl (sext i16:$a), (i16 IntConst_0_14:$b)),
- (MULWIDES32Imm $a, (SHL2MUL16 $b))>;
- def : Pat<(shl (zext i16:$a), (i16 IntConst_0_14:$b)),
- (MULWIDEU32Imm $a, (SHL2MUL16 $b))>;
-
- // Convert "sign/zero-extend then multiply" to mul.wide.
- def : Pat<(mul (sext i32:$a), (sext i32:$b)),
- (MULWIDES64 $a, $b)>;
- def : Pat<(mul (sext i32:$a), (i64 SInt32Const:$b)),
- (MULWIDES64Imm64 $a, (i64 SInt32Const:$b))>;
-
- def : Pat<(mul (zext i32:$a), (zext i32:$b)),
- (MULWIDEU64 $a, $b)>;
- def : Pat<(mul (zext i32:$a), (i64 UInt32Const:$b)),
- (MULWIDEU64Imm64 $a, (i64 UInt32Const:$b))>;
-
- def : Pat<(mul (sext i16:$a), (sext i16:$b)),
- (MULWIDES32 $a, $b)>;
- def : Pat<(mul (sext i16:$a), (i32 SInt16Const:$b)),
- (MULWIDES32Imm32 $a, (i32 SInt16Const:$b))>;
-
- def : Pat<(mul (zext i16:$a), (zext i16:$b)),
- (MULWIDEU32 $a, $b)>;
- def : Pat<(mul (zext i16:$a), (i32 UInt16Const:$b)),
- (MULWIDEU32Imm32 $a, (i32 UInt16Const:$b))>;
+multiclass MULWIDEInst<string suffix, SDPatternOperator op, RegTyInfo big_t, RegTyInfo small_t> {
+ def suffix # _rr :
+ BasicNVPTXInst<(outs big_t.RC:$dst), (ins small_t.RC:$a, small_t.RC:$b),
+ "mul.wide." # suffix,
+ [(set big_t.Ty:$dst, (op small_t.Ty:$a, small_t.Ty:$b))]>;
+ def suffix # _ri :
+ BasicNVPTXInst<(outs big_t.RC:$dst), (ins small_t.RC:$a, small_t.Imm:$b),
+ "mul.wide." # suffix,
+ [(set big_t.Ty:$dst, (op small_t.Ty:$a, imm:$b))]>;
}
+defm MUL_WIDE : MULWIDEInst<"s32", smul_wide, I64RT, I32RT>;
+defm MUL_WIDE : MULWIDEInst<"u32", umul_wide, I64RT, I32RT>;
+defm MUL_WIDE : MULWIDEInst<"s16", smul_wide, I32RT, I16RT>;
+defm MUL_WIDE : MULWIDEInst<"u16", umul_wide, I32RT, I16RT>;
+
//
// Integer multiply-add
//
-def mul_oneuse : OneUse2<mul>;
-
-multiclass MAD<string Ptx, ValueType VT, NVPTXRegClass Reg, Operand Imm> {
+multiclass MADInst<string suffix, SDPatternOperator op, RegTyInfo big_t, RegTyInfo small_t> {
def rrr:
- BasicNVPTXInst<(outs Reg:$dst),
- (ins Reg:$a, Reg:$b, Reg:$c),
- Ptx,
- [(set VT:$dst, (add (mul_oneuse VT:$a, VT:$b), VT:$c))]>;
-
- def rir:
- BasicNVPTXInst<(outs Reg:$dst),
- (ins Reg:$a, Imm:$b, Reg:$c),
- Ptx,
- [(set VT:$dst, (add (mul_oneuse VT:$a, imm:$b), VT:$c))]>;
+ BasicNVPTXInst<(outs big_t.RC:$dst),
+ (ins small_t.RC:$a, small_t.RC:$b, big_t.RC:$c),
+ "mad." # suffix,
+ [(set big_t.Ty:$dst, (add (OneUse2<op> small_t.Ty:$a, small_t.Ty:$b), big_t.Ty:$c))]>;
def rri:
- BasicNVPTXInst<(outs Reg:$dst),
- (ins Reg:$a, Reg:$b, Imm:$c),
- Ptx,
- [(set VT:$dst, (add (mul_oneuse VT:$a, VT:$b), imm:$c))]>;
+ BasicNVPTXInst<(outs big_t.RC:$dst),
+ (ins small_t.RC:$a, small_t.RC:$b, big_t.Imm:$c),
+ "mad." # suffix,
+ [(set big_t.Ty:$dst, (add (OneUse2<op> small_t.Ty:$a, small_t.Ty:$b), imm:$c))]>;
+ def rir:
+ BasicNVPTXInst<(outs big_t.RC:$dst),
+ (ins small_t.RC:$a, small_t.Imm:$b, big_t.RC:$c),
+ "mad." # suffix,
+ [(set big_t.Ty:$dst, (add (OneUse2<op> small_t.Ty:$a, imm:$b), big_t.Ty:$c))]>;
def rii:
- BasicNVPTXInst<(outs Reg:$dst),
- (ins Reg:$a, Imm:$b, Imm:$c),
- Ptx,
- [(set VT:$dst, (add (mul_oneuse VT:$a, imm:$b), imm:$c))]>;
+ BasicNVPTXInst<(outs big_t.RC:$dst),
+ (ins small_t.RC:$a, small_t.Imm:$b, big_t.Imm:$c),
+ "mad." # suffix,
+ [(set big_t.Ty:$dst, (add (OneUse2<op> small_t.Ty:$a, imm:$b), imm:$c))]>;
}
let Predicates = [hasOptEnabled] in {
-defm MAD16 : MAD<"mad.lo.s16", i16, B16, i16imm>;
-defm MAD32 : MAD<"mad.lo.s32", i32, B32, i32imm>;
-defm MAD64 : MAD<"mad.lo.s64", i64, B64, i64imm>;
-}
+ defm MAD_LO_S16 : MADInst<"lo.s16", mul, I16RT, I16RT>;
+ defm MAD_LO_S32 : MADInst<"lo.s32", mul, I32RT, I32RT>;
+ defm MAD_LO_S64 : MADInst<"lo.s64", mul, I64RT, I64RT>;
-foreach t = [I16RT, I32RT, I64RT] in {
- def NEG_S # t.Size :
- BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src),
- "neg.s" # t.Size,
- [(set t.Ty:$dst, (ineg t.Ty:$src))]>;
+ defm MAD_WIDE_U16 : MADInst<"wide.u16", umul_wide, I32RT, I16RT>;
+ defm MAD_WIDE_S16 : MADInst<"wide.s16", smul_wide, I32RT, I16RT>;
+ defm MAD_WIDE_U32 : MADInst<"wide.u32", umul_wide, I64RT, I32RT>;
+ defm MAD_WIDE_S32 : MADInst<"wide.s32", smul_wide, I64RT, I32RT>;
}
//-----------------------------------
@@ -1106,8 +960,7 @@ def fdiv_approx : PatFrag<(ops node:$a, node:$b),
def FRCP32_approx_r :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$b), (ins FTZFlag:$ftz),
"rcp.approx$ftz.f32",
[(set f32:$dst, (fdiv_approx f32imm_1, f32:$b))]>;
@@ -1116,14 +969,12 @@ def FRCP32_approx_r :
//
def FDIV32_approx_rr :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, B32:$b), (ins FTZFlag:$ftz),
"div.approx$ftz.f32",
[(set f32:$dst, (fdiv_approx f32:$a, f32:$b))]>;
def FDIV32_approx_ri :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, f32imm:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, f32imm:$b), (ins FTZFlag:$ftz),
"div.approx$ftz.f32",
[(set f32:$dst, (fdiv_approx f32:$a, fpimm:$b))]>;
//
@@ -1146,14 +997,12 @@ def : Pat<(fdiv_full f32imm_1, f32:$b),
//
def FDIV32rr :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, B32:$b), (ins FTZFlag:$ftz),
"div.full$ftz.f32",
[(set f32:$dst, (fdiv_full f32:$a, f32:$b))]>;
def FDIV32ri :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, f32imm:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, f32imm:$b), (ins FTZFlag:$ftz),
"div.full$ftz.f32",
[(set f32:$dst, (fdiv_full f32:$a, fpimm:$b))]>;
//
@@ -1167,8 +1016,7 @@ def fdiv_ftz : PatFrag<(ops node:$a, node:$b),
def FRCP32r_prec :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$b), (ins FTZFlag:$ftz),
"rcp.rn$ftz.f32",
[(set f32:$dst, (fdiv_ftz f32imm_1, f32:$b))]>;
//
@@ -1176,14 +1024,12 @@ def FRCP32r_prec :
//
def FDIV32rr_prec :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, B32:$b), (ins FTZFlag:$ftz),
"div.rn$ftz.f32",
[(set f32:$dst, (fdiv_ftz f32:$a, f32:$b))]>;
def FDIV32ri_prec :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, f32imm:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, f32imm:$b), (ins FTZFlag:$ftz),
"div.rn$ftz.f32",
[(set f32:$dst, (fdiv_ftz f32:$a, fpimm:$b))]>;
@@ -1262,10 +1108,8 @@ def TANH_APPROX_f32 :
// Template for three-arg bitwise operations. Takes three args, Creates .b16,
// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
multiclass BITWISE<string OpcStr, SDNode OpNode> {
- defm b1 : I3Inst<OpcStr # ".pred", OpNode, I1RT, commutative = true>;
- defm b16 : I3Inst<OpcStr # ".b16", OpNode, I16RT, commutative = true>;
- defm b32 : I3Inst<OpcStr # ".b32", OpNode, I32RT, commutative = true>;
- defm b64 : I3Inst<OpcStr # ".b64", OpNode, I64RT, commutative = true>;
+ foreach t = [I1RT, I16RT, I32RT, I64RT] in
+ defm _ # t.PtxType : I3Inst<OpcStr # "." # t.PtxType, OpNode, t, commutative = true>;
}
defm OR : BITWISE<"or", or>;
@@ -1273,48 +1117,40 @@ defm AND : BITWISE<"and", and>;
defm XOR : BITWISE<"xor", xor>;
// PTX does not support mul on predicates, convert to and instructions
-def : Pat<(mul i1:$a, i1:$b), (ANDb1rr $a, $b)>;
-def : Pat<(mul i1:$a, imm:$b), (ANDb1ri $a, imm:$b)>;
+def : Pat<(mul i1:$a, i1:$b), (AND_predrr $a, $b)>;
+def : Pat<(mul i1:$a, imm:$b), (AND_predri $a, imm:$b)>;
foreach op = [add, sub] in {
- def : Pat<(op i1:$a, i1:$b), (XORb1rr $a, $b)>;
- def : Pat<(op i1:$a, imm:$b), (XORb1ri $a, imm:$b)>;
+ def : Pat<(op i1:$a, i1:$b), (XOR_predrr $a, $b)>;
+ def : Pat<(op i1:$a, imm:$b), (XOR_predri $a, imm:$b)>;
}
// These transformations were once reliably performed by instcombine, but thanks
// to poison semantics they are no longer safe for LLVM IR, perform them here
// instead.
-def : Pat<(select i1:$a, i1:$b, 0), (ANDb1rr $a, $b)>;
-def : Pat<(select i1:$a, 1, i1:$b), (ORb1rr $a, $b)>;
+def : Pat<(select i1:$a, i1:$b, 0), (AND_predrr $a, $b)>;
+def : Pat<(select i1:$a, 1, i1:$b), (OR_predrr $a, $b)>;
// Lower logical v2i16/v4i8 ops as bitwise ops on b32.
foreach vt = [v2i16, v4i8] in {
- def : Pat<(or vt:$a, vt:$b), (ORb32rr $a, $b)>;
- def : Pat<(xor vt:$a, vt:$b), (XORb32rr $a, $b)>;
- def : Pat<(and vt:$a, vt:$b), (ANDb32rr $a, $b)>;
+ def : Pat<(or vt:$a, vt:$b), (OR_b32rr $a, $b)>;
+ def : Pat<(xor vt:$a, vt:$b), (XOR_b32rr $a, $b)>;
+ def : Pat<(and vt:$a, vt:$b), (AND_b32rr $a, $b)>;
// The constants get legalized into a bitcast from i32, so that's what we need
// to match here.
def: Pat<(or vt:$a, (vt (bitconvert (i32 imm:$b)))),
- (ORb32ri $a, imm:$b)>;
+ (OR_b32ri $a, imm:$b)>;
def: Pat<(xor vt:$a, (vt (bitconvert (i32 imm:$b)))),
- (XORb32ri $a, imm:$b)>;
+ (XOR_b32ri $a, imm:$b)>;
def: Pat<(and vt:$a, (vt (bitconvert (i32 imm:$b)))),
- (ANDb32ri $a, imm:$b)>;
-}
-
-def NOT1 : BasicNVPTXInst<(outs B1:$dst), (ins B1:$src),
- "not.pred",
- [(set i1:$dst, (not i1:$src))]>;
-def NOT16 : BasicNVPTXInst<(outs B16:$dst), (ins B16:$src),
- "not.b16",
- [(set i16:$dst, (not i16:$src))]>;
-def NOT32 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src),
- "not.b32",
- [(set i32:$dst, (not i32:$src))]>;
-def NOT64 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
- "not.b64",
- [(set i64:$dst, (not i64:$src))]>;
+ (AND_b32ri $a, imm:$b)>;
+}
+
+foreach t = [I1RT, I16RT, I32RT, I64RT] in
+ def NOT_ # t.PtxType : BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src),
+ "not." # t.PtxType,
+ [(set t.Ty:$dst, (not t.Ty:$src))]>;
// Template for left/right shifts. Takes three operands,
// [dest (reg), src (reg), shift (reg or imm)].
@@ -1322,34 +1158,22 @@ def NOT64 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
//
// This template also defines a 32-bit shift (imm, imm) instruction.
multiclass SHIFT<string OpcStr, SDNode OpNode> {
- def i64rr :
- BasicNVPTXInst<(outs B64:$dst), (ins B64:$a, B32:$b),
- OpcStr # "64",
- [(set i64:$dst, (OpNode i64:$a, i32:$b))]>;
- def i64ri :
- BasicNVPTXInst<(outs B64:$dst), (ins B64:$a, i32imm:$b),
- OpcStr # "64",
- [(set i64:$dst, (OpNode i64:$a, (i32 imm:$b)))]>;
- def i32rr :
- BasicNVPTXInst<(outs B32:$dst), (ins B32:$a, B32:$b),
- OpcStr # "32",
- [(set i32:$dst, (OpNode i32:$a, i32:$b))]>;
- def i32ri :
- BasicNVPTXInst<(outs B32:$dst), (ins B32:$a, i32imm:$b),
- OpcStr # "32",
- [(set i32:$dst, (OpNode i32:$a, (i32 imm:$b)))]>;
- def i32ii :
- BasicNVPTXInst<(outs B32:$dst), (ins i32imm:$a, i32imm:$b),
- OpcStr # "32",
- [(set i32:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
- def i16rr :
- BasicNVPTXInst<(outs B16:$dst), (ins B16:$a, B32:$b),
- OpcStr # "16",
- [(set i16:$dst, (OpNode i16:$a, i32:$b))]>;
- def i16ri :
- BasicNVPTXInst<(outs B16:$dst), (ins B16:$a, i32imm:$b),
- OpcStr # "16",
- [(set i16:$dst, (OpNode i16:$a, (i32 imm:$b)))]>;
+ let hasSideEffects = false in {
+ foreach t = [I64RT, I32RT, I16RT] in {
+ def t.Size # _rr :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a, B32:$b),
+ OpcStr # t.Size,
+ [(set t.Ty:$dst, (OpNode t.Ty:$a, i32:$b))]>;
+ def t.Size # _ri :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a, i32imm:$b),
+ OpcStr # t.Size,
+ [(set t.Ty:$dst, (OpNode t.Ty:$a, (i32 imm:$b)))]>;
+ def t.Size # _ii :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a, i32imm:$b),
+ OpcStr # t.Size,
+ [(set t.Ty:$dst, (OpNode (t.Ty imm:$a), (i32 imm:$b)))]>;
+ }
+ }
}
defm SHL : SHIFT<"shl.b", shl>;
@@ -1357,14 +1181,11 @@ defm SRA : SHIFT<"shr.s", sra>;
defm SRL : SHIFT<"shr.u", srl>;
// Bit-reverse
-def BREV32 :
- BasicNVPTXInst<(outs B32:$dst), (ins B32:$a),
- "brev.b32",
- [(set i32:$dst, (bitreverse i32:$a))]>;
-def BREV64 :
- BasicNVPTXInst<(outs B64:$dst), (ins B64:$a),
- "brev.b64",
- [(set i64:$dst, (bitreverse i64:$a))]>;
+foreach t = [I64RT, I32RT] in
+ def BREV_ # t.PtxType :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a),
+ "brev." # t.PtxType,
+ [(set t.Ty:$dst, (bitreverse t.Ty:$a))]>;
//
@@ -1516,20 +1337,19 @@ def : Pat<(i16 (sext_inreg (trunc (prmt i32:$s, 0, byte_extract_prmt:$sel, PrmtN
// Byte extraction via shift/trunc/sext
-def : Pat<(i16 (sext_inreg (trunc i32:$s), i8)),
- (CVT_s8_s32 $s, CvtNONE)>;
-def : Pat<(i16 (sext_inreg (trunc (srl i32:$s, (i32 imm:$o))), i8)),
+def : Pat<(i16 (sext_inreg (trunc i32:$s), i8)), (CVT_s8_s32 $s, CvtNONE)>;
+def : Pat<(i16 (sext_inreg (trunc i64:$s), i8)), (CVT_s8_s64 $s, CvtNONE)>;
+
+def : Pat<(sext_inreg (srl i32:$s, (i32 imm:$o)), i8), (BFE_S32rii $s, imm:$o, 8)>;
+def : Pat<(sext_inreg (srl i64:$s, (i32 imm:$o)), i8), (BFE_S64rii $s, imm:$o, 8)>;
+
+def : Pat<(i16 (sext_inreg (trunc (srl i32:$s, (i32 imm:$o))), i8)),
(CVT_s8_s32 (BFE_S32rii $s, imm:$o, 8), CvtNONE)>;
-def : Pat<(sext_inreg (srl i32:$s, (i32 imm:$o)), i8),
- (BFE_S32rii $s, imm:$o, 8)>;
+def : Pat<(i16 (sext_inreg (trunc (srl i64:$s, (i32 imm:$o))), i8)),
+ (CVT_s8_s64 (BFE_S64rii $s, imm:$o, 8), CvtNONE)>;
+
def : Pat<(i16 (sra (i16 (trunc i32:$s)), (i32 8))),
(CVT_s8_s32 (BFE_S32rii $s, 8, 8), CvtNONE)>;
-def : Pat<(sext_inreg (srl i64:$s, (i32 imm:$o)), i8),
- (BFE_S64rii $s, imm:$o, 8)>;
-def : Pat<(i16 (sext_inreg (trunc i64:$s), i8)),
- (CVT_s8_s64 $s, CvtNONE)>;
-def : Pat<(i16 (sext_inreg (trunc (srl i64:$s, (i32 imm:$o))), i8)),
- (CVT_s8_s64 (BFE_S64rii $s, imm:$o, 8), CvtNONE)>;
//-----------------------------------
// Comparison instructions (setp, set)
@@ -1619,10 +1439,7 @@ def SETP_bf16x2rr :
def addr : ComplexPattern<pAny, 2, "SelectADDR">;
-def ADDR_base : Operand<pAny> {
- let PrintMethod = "printOperand";
-}
-
+def ADDR_base : Operand<pAny>;
def ADDR : Operand<pAny> {
let PrintMethod = "printMemOperand";
let MIOperandInfo = (ops ADDR_base, i32imm);
@@ -1636,10 +1453,6 @@ def MmaCode : Operand<i32> {
let PrintMethod = "printMmaCode";
}
-def Offseti32imm : Operand<i32> {
- let PrintMethod = "printOffseti32imm";
-}
-
// Get pointer to local stack.
let hasSideEffects = false in {
def MOV_DEPOT_ADDR : NVPTXInst<(outs B32:$d), (ins i32imm:$num),
@@ -1651,33 +1464,31 @@ let hasSideEffects = false in {
// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
let hasSideEffects = false, isAsCheapAsAMove = true in {
- // Class for register-to-register moves
- class MOVr<RegisterClass RC, string OpStr> :
- BasicNVPTXInst<(outs RC:$dst), (ins RC:$src),
- "mov." # OpStr>;
-
- // Class for immediate-to-register moves
- class MOVi<RegisterClass RC, string OpStr, ValueType VT, Operand IMMType, SDNode ImmNode> :
- BasicNVPTXInst<(outs RC:$dst), (ins IMMType:$src),
- "mov." # OpStr,
- [(set VT:$dst, ImmNode:$src)]>;
-}
+ let isMoveReg = true in
+ class MOVr<RegisterClass RC, string OpStr> :
+ BasicNVPTXInst<(outs RC:$dst), (ins RC:$src), "mov." # OpStr>;
-def IMOV1r : MOVr<B1, "pred">;
-def MOV16r : MOVr<B16, "b16">;
-def IMOV32r : MOVr<B32, "b32">;
-def IMOV64r : MOVr<B64, "b64">;
-def IMOV128r : MOVr<B128, "b128">;
+ let isMoveImm = true in
+ class MOVi<RegTyInfo t, string suffix> :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.Imm:$src),
+ "mov." # suffix,
+ [(set t.Ty:$dst, t.ImmNode:$src)]>;
+}
+def MOV_B1_r : MOVr<B1, "pred">;
+def MOV_B16_r : MOVr<B16, "b16">;
+def MOV_B32_r : MOVr<B32, "b32">;
+def MOV_B64_r : MOVr<B64, "b64">;
+def MOV_B128_r : MOVr<B128, "b128">;
-def IMOV1i : MOVi<B1, "pred", i1, i1imm, imm>;
-def IMOV16i : MOVi<B16, "b16", i16, i16imm, imm>;
-def IMOV32i : MOVi<B32, "b32", i32, i32imm, imm>;
-def IMOV64i : MOVi<B64, "b64", i64, i64imm, imm>;
-def FMOV16i : MOVi<B16, "b16", f16, f16imm, fpimm>;
-def BFMOV16i : MOVi<B16, "b16", bf16, bf16imm, fpimm>;
-def FMOV32i : MOVi<B32, "b32", f32, f32imm, fpimm>;
-def FMOV64i : MOVi<B64, "b64", f64, f64imm, fpimm>;
+def MOV_B1_i : MOVi<I1RT, "pred">;
+def MOV_B16_i : MOVi<I16RT, "b16">;
+def MOV_B32_i : MOVi<I32RT, "b32">;
+def MOV_B64_i : MOVi<I64RT, "b64">;
+def MOV_F16_i : MOVi<F16RT, "b16">;
+def MOV_BF16_i : MOVi<BF16RT, "b16">;
+def MOV_F32_i : MOVi<F32RT, "b32">;
+def MOV_F64_i : MOVi<F64RT, "b64">;
def to_tglobaladdr : SDNodeXForm<globaladdr, [{
@@ -1695,11 +1506,11 @@ def to_tframeindex : SDNodeXForm<frameindex, [{
return CurDAG->getTargetFrameIndex(N->getIndex(), N->getValueType(0));
}]>;
-def : Pat<(i32 globaladdr:$dst), (IMOV32i (to_tglobaladdr $dst))>;
-def : Pat<(i64 globaladdr:$dst), (IMOV64i (to_tglobaladdr $dst))>;
+def : Pat<(i32 globaladdr:$dst), (MOV_B32_i (to_tglobaladdr $dst))>;
+def : Pat<(i64 globaladdr:$dst), (MOV_B64_i (to_tglobaladdr $dst))>;
-def : Pat<(i32 externalsym:$dst), (IMOV32i (to_texternsym $dst))>;
-def : Pat<(i64 externalsym:$dst), (IMOV64i (to_texternsym $dst))>;
+def : Pat<(i32 externalsym:$dst), (MOV_B32_i (to_texternsym $dst))>;
+def : Pat<(i64 externalsym:$dst), (MOV_B64_i (to_texternsym $dst))>;
//---- Copy Frame Index ----
def LEA_ADDRi : NVPTXInst<(outs B32:$dst), (ins ADDR:$addr),
@@ -1713,45 +1524,34 @@ def : Pat<(i64 frameindex:$fi), (LEA_ADDRi64 (to_tframeindex $fi), 0)>;
//-----------------------------------
// Comparison and Selection
//-----------------------------------
+// TODO: These patterns seem very specific and brittle. We should try to find
+// a more general solution.
def cond_signed : PatLeaf<(cond), [{
return isSignedIntSetCC(N->get());
}]>;
-def cond_not_signed : PatLeaf<(cond), [{
- return !isSignedIntSetCC(N->get());
-}]>;
+// A 16-bit signed comparison of sign-extended byte extracts can be converted
+// to 32-bit comparison if we change the PRMT to sign-extend the extracted
+// bytes.
+def : Pat<(setcc (i16 (sext_inreg (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE)), i8)),
+ (i16 (sext_inreg (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE)), i8)),
+ cond_signed:$cc),
+ (SETP_i32rr (PRMT_B32rii i32:$a, 0, (to_sign_extend_selector $sel_a), PrmtNONE),
+ (PRMT_B32rii i32:$b, 0, (to_sign_extend_selector $sel_b), PrmtNONE),
+ (cond2cc $cc))>;
+
+// A 16-bit comparison of truncated byte extracts can be be converted to 32-bit
+// comparison because we know that the truncate is just trancating off zeros
+// and that the most-significant byte is also zeros so the meaning of signed and
+// unsigned comparisons will not be changed.
+def : Pat<(setcc (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))),
+ (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))),
+ cond:$cc),
+ (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
+ (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
+ (cond2cc $cc))>;
-// comparisons of i8 extracted with PRMT as i32
-// It's faster to do comparison directly on i32 extracted by PRMT,
-// instead of the long conversion and sign extending.
-def: Pat<(setcc (i16 (sext_inreg (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))), i8)),
- (i16 (sext_inreg (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))), i8)),
- cond_signed:$cc),
- (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
- (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
- (cond2cc $cc))>;
-
-def: Pat<(setcc (i16 (sext_inreg (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE)), i8)),
- (i16 (sext_inreg (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE)), i8)),
- cond_signed:$cc),
- (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
- (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
- (cond2cc $cc))>;
-
-def: Pat<(setcc (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))),
- (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))),
- cond_signed:$cc),
- (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
- (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
- (cond2cc $cc))>;
-
-def: Pat<(setcc (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))),
- (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))),
- cond_not_signed:$cc),
- (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
- (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
- (cond2cc $cc))>;
def SDTDeclareArrayParam :
SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i32>]>;
@@ -1830,6 +1630,18 @@ def : Pat<(declare_array_param externalsym:$a, imm:$align, imm:$size),
def : Pat<(declare_scalar_param externalsym:$a, imm:$size),
(DECLARE_PARAM_scalar (to_texternsym $a), imm:$size)>;
+// Call prototype wrapper, this is a dummy instruction that just prints it's
+// operand which is string defining the prototype.
+def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def CallPrototype :
+ SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def ProtoIdent : Operand<i32> { let PrintMethod = "printProtoIdent"; }
+def CALL_PROTOTYPE :
+ NVPTXInst<(outs), (ins ProtoIdent:$ident),
+ "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
+
+
foreach t = [I32RT, I64RT] in {
defvar inst_name = "MOV" # t.Size # "_PARAM";
def inst_name : BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src), "mov.b" # t.Size>;
@@ -1849,6 +1661,32 @@ defm ProxyRegB16 : ProxyRegInst<"b16", B16>;
defm ProxyRegB32 : ProxyRegInst<"b32", B32>;
defm ProxyRegB64 : ProxyRegInst<"b64", B64>;
+
+// Callseq start and end
+
+// Note: these nodes are marked as SDNPMayStore and SDNPMayLoad because
+// they define the scope in which the declared params may be used. Therefore
+// we add these flags to ensure ld.param and st.param are not sunk or hoisted
+// out of that scope.
+
+def callseq_start : SDNode<"ISD::CALLSEQ_START",
+ SDCallSeqStart<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>,
+ [SDNPHasChain, SDNPOutGlue,
+ SDNPSideEffect, SDNPMayStore, SDNPMayLoad]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END",
+ SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPSideEffect, SDNPMayStore, SDNPMayLoad]>;
+
+def Callseq_Start :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\{ // callseq $amt1, $amt2",
+ [(callseq_start timm:$amt1, timm:$amt2)]>;
+def Callseq_End :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\} // callseq $amt1",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+
//
// Load / Store Handling
//
@@ -1861,7 +1699,6 @@ class LD<NVPTXRegClass regclass>
"\t$dst, [$addr];", []>;
let mayLoad=1, hasSideEffects=0 in {
- def LD_i8 : LD<B16>;
def LD_i16 : LD<B16>;
def LD_i32 : LD<B32>;
def LD_i64 : LD<B64>;
@@ -1877,7 +1714,6 @@ class ST<DAGOperand O>
" \t[$addr], $src;", []>;
let mayStore=1, hasSideEffects=0 in {
- def ST_i8 : ST<RI16>;
def ST_i16 : ST<RI16>;
def ST_i32 : ST<RI32>;
def ST_i64 : ST<RI64>;
@@ -1910,7 +1746,6 @@ multiclass LD_VEC<NVPTXRegClass regclass, bit support_v8 = false> {
"[$addr];", []>;
}
let mayLoad=1, hasSideEffects=0 in {
- defm LDV_i8 : LD_VEC<B16>;
defm LDV_i16 : LD_VEC<B16>;
defm LDV_i32 : LD_VEC<B32, support_v8 = true>;
defm LDV_i64 : LD_VEC<B64>;
@@ -1944,7 +1779,6 @@ multiclass ST_VEC<DAGOperand O, bit support_v8 = false> {
}
let mayStore=1, hasSideEffects=0 in {
- defm STV_i8 : ST_VEC<RI16>;
defm STV_i16 : ST_VEC<RI16>;
defm STV_i32 : ST_VEC<RI32, support_v8 = true>;
defm STV_i64 : ST_VEC<RI64>;
@@ -2114,14 +1948,14 @@ def : Pat<(i64 (anyext i32:$a)), (CVT_u64_u32 $a, CvtNONE)>;
// truncate i64
def : Pat<(i32 (trunc i64:$a)), (CVT_u32_u64 $a, CvtNONE)>;
def : Pat<(i16 (trunc i64:$a)), (CVT_u16_u64 $a, CvtNONE)>;
-def : Pat<(i1 (trunc i64:$a)), (SETP_i64ri (ANDb64ri $a, 1), 0, CmpNE)>;
+def : Pat<(i1 (trunc i64:$a)), (SETP_i64ri (AND_b64ri $a, 1), 0, CmpNE)>;
// truncate i32
def : Pat<(i16 (trunc i32:$a)), (CVT_u16_u32 $a, CvtNONE)>;
-def : Pat<(i1 (trunc i32:$a)), (SETP_i32ri (ANDb32ri $a, 1), 0, CmpNE)>;
+def : Pat<(i1 (trunc i32:$a)), (SETP_i32ri (AND_b32ri $a, 1), 0, CmpNE)>;
// truncate i16
-def : Pat<(i1 (trunc i16:$a)), (SETP_i16ri (ANDb16ri $a, 1), 0, CmpNE)>;
+def : Pat<(i1 (trunc i16:$a)), (SETP_i16ri (AND_b16ri $a, 1), 0, CmpNE)>;
// sext_inreg
def : Pat<(sext_inreg i16:$a, i8), (CVT_INREG_s16_s8 $a)>;
@@ -2365,52 +2199,20 @@ defm : CVT_ROUND<frint, CvtRNI, CvtRNI_FTZ>;
//-----------------------------------
let isTerminator=1 in {
- let isReturn=1, isBarrier=1 in
+ let isReturn=1, isBarrier=1 in
def Return : BasicNVPTXInst<(outs), (ins), "ret", [(retglue)]>;
- let isBranch=1 in
- def CBranch : NVPTXInst<(outs), (ins B1:$a, brtarget:$target),
+ let isBranch=1 in {
+ def CBranch : NVPTXInst<(outs), (ins B1:$a, brtarget:$target),
"@$a bra \t$target;",
[(brcond i1:$a, bb:$target)]>;
- let isBranch=1 in
- def CBranchOther : NVPTXInst<(outs), (ins B1:$a, brtarget:$target),
- "@!$a bra \t$target;", []>;
- let isBranch=1, isBarrier=1 in
+ let isBarrier=1 in
def GOTO : BasicNVPTXInst<(outs), (ins brtarget:$target),
- "bra.uni", [(br bb:$target)]>;
+ "bra.uni", [(br bb:$target)]>;
+ }
}
-def : Pat<(brcond i32:$a, bb:$target),
- (CBranch (SETP_i32ri $a, 0, CmpNE), bb:$target)>;
-
-// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
-// conditional branch if the target block is the next block so that the code
-// can fall through to the target block. The inversion is done by 'xor
-// condition, 1', which will be translated to (setne condition, -1). Since ptx
-// supports '@!pred bra target', we should use it.
-def : Pat<(brcond (i1 (setne i1:$a, -1)), bb:$target),
- (CBranchOther $a, bb:$target)>;
-
-// Call
-def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
- SDTCisVT<1, i32>]>;
-def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-
-def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
- [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPSideEffect]>;
-
-def Callseq_Start :
- NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "\\{ // callseq $amt1, $amt2",
- [(callseq_start timm:$amt1, timm:$amt2)]>;
-def Callseq_End :
- NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "\\} // callseq $amt1",
- [(callseq_end timm:$amt1, timm:$amt2)]>;
// trap instruction
def trapinst : BasicNVPTXInst<(outs), (ins), "trap", [(trap)]>, Requires<[noPTXASUnreachableBug]>;
@@ -2420,18 +2222,6 @@ def trapexitinst : NVPTXInst<(outs), (ins), "trap; exit;", [(trap)]>, Requires<[
// brkpt instruction
def debugtrapinst : BasicNVPTXInst<(outs), (ins), "brkpt", [(debugtrap)]>;
-// Call prototype wrapper
-def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def CallPrototype :
- SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def ProtoIdent : Operand<i32> {
- let PrintMethod = "printProtoIdent";
-}
-def CALL_PROTOTYPE :
- NVPTXInst<(outs), (ins ProtoIdent:$ident),
- "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
-
def SDTDynAllocaOp :
SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisInt<1>, SDTCisVT<2, i32>]>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 0a00220..d337192 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -243,63 +243,82 @@ foreach sync = [false, true] in {
}
// vote.{all,any,uni,ballot}
-multiclass VOTE<NVPTXRegClass regclass, string mode, Intrinsic IntOp> {
- def : BasicNVPTXInst<(outs regclass:$dest), (ins B1:$pred),
- "vote." # mode,
- [(set regclass:$dest, (IntOp i1:$pred))]>,
- Requires<[hasPTX<60>, hasSM<30>]>;
-}
+let Predicates = [hasPTX<60>, hasSM<30>] in {
+ multiclass VOTE<string mode, RegTyInfo t, Intrinsic op> {
+ def : BasicNVPTXInst<(outs t.RC:$dest), (ins B1:$pred),
+ "vote." # mode # "." # t.PtxType,
+ [(set t.Ty:$dest, (op i1:$pred))]>;
+ }
-defm VOTE_ALL : VOTE<B1, "all.pred", int_nvvm_vote_all>;
-defm VOTE_ANY : VOTE<B1, "any.pred", int_nvvm_vote_any>;
-defm VOTE_UNI : VOTE<B1, "uni.pred", int_nvvm_vote_uni>;
-defm VOTE_BALLOT : VOTE<B32, "ballot.b32", int_nvvm_vote_ballot>;
+ defm VOTE_ALL : VOTE<"all", I1RT, int_nvvm_vote_all>;
+ defm VOTE_ANY : VOTE<"any", I1RT, int_nvvm_vote_any>;
+ defm VOTE_UNI : VOTE<"uni", I1RT, int_nvvm_vote_uni>;
+ defm VOTE_BALLOT : VOTE<"ballot", I32RT, int_nvvm_vote_ballot>;
+
+ // vote.sync.{all,any,uni,ballot}
+ multiclass VOTE_SYNC<string mode, RegTyInfo t, Intrinsic op> {
+ def i : BasicNVPTXInst<(outs t.RC:$dest), (ins B1:$pred, i32imm:$mask),
+ "vote.sync." # mode # "." # t.PtxType,
+ [(set t.Ty:$dest, (op imm:$mask, i1:$pred))]>;
+ def r : BasicNVPTXInst<(outs t.RC:$dest), (ins B1:$pred, B32:$mask),
+ "vote.sync." # mode # "." # t.PtxType,
+ [(set t.Ty:$dest, (op i32:$mask, i1:$pred))]>;
+ }
-// vote.sync.{all,any,uni,ballot}
-multiclass VOTE_SYNC<NVPTXRegClass regclass, string mode, Intrinsic IntOp> {
- def i : BasicNVPTXInst<(outs regclass:$dest), (ins B1:$pred, i32imm:$mask),
- "vote.sync." # mode,
- [(set regclass:$dest, (IntOp imm:$mask, i1:$pred))]>,
- Requires<[hasPTX<60>, hasSM<30>]>;
- def r : BasicNVPTXInst<(outs regclass:$dest), (ins B1:$pred, B32:$mask),
- "vote.sync." # mode,
- [(set regclass:$dest, (IntOp i32:$mask, i1:$pred))]>,
- Requires<[hasPTX<60>, hasSM<30>]>;
+ defm VOTE_SYNC_ALL : VOTE_SYNC<"all", I1RT, int_nvvm_vote_all_sync>;
+ defm VOTE_SYNC_ANY : VOTE_SYNC<"any", I1RT, int_nvvm_vote_any_sync>;
+ defm VOTE_SYNC_UNI : VOTE_SYNC<"uni", I1RT, int_nvvm_vote_uni_sync>;
+ defm VOTE_SYNC_BALLOT : VOTE_SYNC<"ballot", I32RT, int_nvvm_vote_ballot_sync>;
}
-
-defm VOTE_SYNC_ALL : VOTE_SYNC<B1, "all.pred", int_nvvm_vote_all_sync>;
-defm VOTE_SYNC_ANY : VOTE_SYNC<B1, "any.pred", int_nvvm_vote_any_sync>;
-defm VOTE_SYNC_UNI : VOTE_SYNC<B1, "uni.pred", int_nvvm_vote_uni_sync>;
-defm VOTE_SYNC_BALLOT : VOTE_SYNC<B32, "ballot.b32", int_nvvm_vote_ballot_sync>;
-
// elect.sync
+let Predicates = [hasPTX<80>, hasSM<90>] in {
def INT_ELECT_SYNC_I : BasicNVPTXInst<(outs B32:$dest, B1:$pred), (ins i32imm:$mask),
"elect.sync",
- [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync imm:$mask))]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync imm:$mask))]>;
def INT_ELECT_SYNC_R : BasicNVPTXInst<(outs B32:$dest, B1:$pred), (ins B32:$mask),
"elect.sync",
- [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync i32:$mask))]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync i32:$mask))]>;
+}
+
+let Predicates = [hasPTX<60>, hasSM<70>] in {
+ multiclass MATCH_ANY_SYNC<Intrinsic op, RegTyInfo t> {
+ def ii : BasicNVPTXInst<(outs B32:$dest), (ins t.Imm:$value, i32imm:$mask),
+ "match.any.sync." # t.PtxType,
+ [(set i32:$dest, (op imm:$mask, imm:$value))]>;
+ def ir : BasicNVPTXInst<(outs B32:$dest), (ins t.Imm:$value, B32:$mask),
+ "match.any.sync." # t.PtxType,
+ [(set i32:$dest, (op i32:$mask, imm:$value))]>;
+ def ri : BasicNVPTXInst<(outs B32:$dest), (ins t.RC:$value, i32imm:$mask),
+ "match.any.sync." # t.PtxType,
+ [(set i32:$dest, (op imm:$mask, t.Ty:$value))]>;
+ def rr : BasicNVPTXInst<(outs B32:$dest), (ins t.RC:$value, B32:$mask),
+ "match.any.sync." # t.PtxType,
+ [(set i32:$dest, (op i32:$mask, t.Ty:$value))]>;
+ }
-multiclass MATCH_ANY_SYNC<NVPTXRegClass regclass, string ptxtype, Intrinsic IntOp,
- Operand ImmOp> {
- def ii : BasicNVPTXInst<(outs B32:$dest), (ins ImmOp:$value, i32imm:$mask),
- "match.any.sync." # ptxtype,
- [(set i32:$dest, (IntOp imm:$mask, imm:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def ir : BasicNVPTXInst<(outs B32:$dest), (ins ImmOp:$value, B32:$mask),
- "match.any.sync." # ptxtype,
- [(set i32:$dest, (IntOp i32:$mask, imm:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def ri : BasicNVPTXInst<(outs B32:$dest), (ins regclass:$value, i32imm:$mask),
- "match.any.sync." # ptxtype,
- [(set i32:$dest, (IntOp imm:$mask, regclass:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def rr : BasicNVPTXInst<(outs B32:$dest), (ins regclass:$value, B32:$mask),
- "match.any.sync." # ptxtype,
- [(set i32:$dest, (IntOp i32:$mask, regclass:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
+ defm MATCH_ANY_SYNC_32 : MATCH_ANY_SYNC<int_nvvm_match_any_sync_i32, I32RT>;
+ defm MATCH_ANY_SYNC_64 : MATCH_ANY_SYNC<int_nvvm_match_any_sync_i64, I64RT>;
+
+ multiclass MATCH_ALLP_SYNC<RegTyInfo t, Intrinsic op> {
+ def ii : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
+ (ins t.Imm:$value, i32imm:$mask),
+ "match.all.sync." # t.PtxType,
+ [(set i32:$dest, i1:$pred, (op imm:$mask, imm:$value))]>;
+ def ir : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
+ (ins t.Imm:$value, B32:$mask),
+ "match.all.sync." # t.PtxType,
+ [(set i32:$dest, i1:$pred, (op i32:$mask, imm:$value))]>;
+ def ri : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
+ (ins t.RC:$value, i32imm:$mask),
+ "match.all.sync." # t.PtxType,
+ [(set i32:$dest, i1:$pred, (op imm:$mask, t.Ty:$value))]>;
+ def rr : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
+ (ins t.RC:$value, B32:$mask),
+ "match.all.sync." # t.PtxType,
+ [(set i32:$dest, i1:$pred, (op i32:$mask, t.Ty:$value))]>;
+ }
+ defm MATCH_ALLP_SYNC_32 : MATCH_ALLP_SYNC<I32RT, int_nvvm_match_all_sync_i32p>;
+ defm MATCH_ALLP_SYNC_64 : MATCH_ALLP_SYNC<I64RT, int_nvvm_match_all_sync_i64p>;
}
// activemask.b32
@@ -308,39 +327,6 @@ def ACTIVEMASK : BasicNVPTXInst<(outs B32:$dest), (ins),
[(set i32:$dest, (int_nvvm_activemask))]>,
Requires<[hasPTX<62>, hasSM<30>]>;
-defm MATCH_ANY_SYNC_32 : MATCH_ANY_SYNC<B32, "b32", int_nvvm_match_any_sync_i32,
- i32imm>;
-defm MATCH_ANY_SYNC_64 : MATCH_ANY_SYNC<B64, "b64", int_nvvm_match_any_sync_i64,
- i64imm>;
-
-multiclass MATCH_ALLP_SYNC<NVPTXRegClass regclass, string ptxtype, Intrinsic IntOp,
- Operand ImmOp> {
- def ii : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
- (ins ImmOp:$value, i32imm:$mask),
- "match.all.sync." # ptxtype,
- [(set i32:$dest, i1:$pred, (IntOp imm:$mask, imm:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def ir : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
- (ins ImmOp:$value, B32:$mask),
- "match.all.sync." # ptxtype,
- [(set i32:$dest, i1:$pred, (IntOp i32:$mask, imm:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def ri : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
- (ins regclass:$value, i32imm:$mask),
- "match.all.sync." # ptxtype,
- [(set i32:$dest, i1:$pred, (IntOp imm:$mask, regclass:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def rr : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
- (ins regclass:$value, B32:$mask),
- "match.all.sync." # ptxtype,
- [(set i32:$dest, i1:$pred, (IntOp i32:$mask, regclass:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
-}
-defm MATCH_ALLP_SYNC_32 : MATCH_ALLP_SYNC<B32, "b32", int_nvvm_match_all_sync_i32p,
- i32imm>;
-defm MATCH_ALLP_SYNC_64 : MATCH_ALLP_SYNC<B64, "b64", int_nvvm_match_all_sync_i64p,
- i64imm>;
-
multiclass REDUX_SYNC<string BinOp, string PTXType, Intrinsic Intrin> {
def : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src, B32:$mask),
"redux.sync." # BinOp # "." # PTXType,
@@ -381,24 +367,20 @@ defm REDUX_SYNC_FMAX_ABS_NAN: REDUX_SYNC_F<"max", ".abs", ".NaN">;
//-----------------------------------
// Explicit Memory Fence Functions
//-----------------------------------
-class MEMBAR<string StrOp, Intrinsic IntOP> :
- BasicNVPTXInst<(outs), (ins),
- StrOp, [(IntOP)]>;
+class NullaryInst<string StrOp, Intrinsic IntOP> :
+ BasicNVPTXInst<(outs), (ins), StrOp, [(IntOP)]>;
-def INT_MEMBAR_CTA : MEMBAR<"membar.cta", int_nvvm_membar_cta>;
-def INT_MEMBAR_GL : MEMBAR<"membar.gl", int_nvvm_membar_gl>;
-def INT_MEMBAR_SYS : MEMBAR<"membar.sys", int_nvvm_membar_sys>;
+def INT_MEMBAR_CTA : NullaryInst<"membar.cta", int_nvvm_membar_cta>;
+def INT_MEMBAR_GL : NullaryInst<"membar.gl", int_nvvm_membar_gl>;
+def INT_MEMBAR_SYS : NullaryInst<"membar.sys", int_nvvm_membar_sys>;
def INT_FENCE_SC_CLUSTER:
- MEMBAR<"fence.sc.cluster", int_nvvm_fence_sc_cluster>,
+ NullaryInst<"fence.sc.cluster", int_nvvm_fence_sc_cluster>,
Requires<[hasPTX<78>, hasSM<90>]>;
// Proxy fence (uni-directional)
-// fence.proxy.tensormap.release variants
-
class FENCE_PROXY_TENSORMAP_GENERIC_RELEASE<string Scope, Intrinsic Intr> :
- BasicNVPTXInst<(outs), (ins),
- "fence.proxy.tensormap::generic.release." # Scope, [(Intr)]>,
+ NullaryInst<"fence.proxy.tensormap::generic.release." # Scope, Intr>,
Requires<[hasPTX<83>, hasSM<90>]>;
def INT_FENCE_PROXY_TENSORMAP_GENERIC_RELEASE_CTA:
@@ -488,35 +470,31 @@ defm CP_ASYNC_CG_SHARED_GLOBAL_16 :
CP_ASYNC_SHARED_GLOBAL_I<"cg", "16", int_nvvm_cp_async_cg_shared_global_16,
int_nvvm_cp_async_cg_shared_global_16_s>;
-def CP_ASYNC_COMMIT_GROUP :
- BasicNVPTXInst<(outs), (ins), "cp.async.commit_group", [(int_nvvm_cp_async_commit_group)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
+let Predicates = [hasPTX<70>, hasSM<80>] in {
+ def CP_ASYNC_COMMIT_GROUP :
+ NullaryInst<"cp.async.commit_group", int_nvvm_cp_async_commit_group>;
-def CP_ASYNC_WAIT_GROUP :
- BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.wait_group",
- [(int_nvvm_cp_async_wait_group timm:$n)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
+ def CP_ASYNC_WAIT_GROUP :
+ BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.wait_group",
+ [(int_nvvm_cp_async_wait_group timm:$n)]>;
-def CP_ASYNC_WAIT_ALL :
- BasicNVPTXInst<(outs), (ins), "cp.async.wait_all",
- [(int_nvvm_cp_async_wait_all)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
+ def CP_ASYNC_WAIT_ALL :
+ NullaryInst<"cp.async.wait_all", int_nvvm_cp_async_wait_all>;
+}
-// cp.async.bulk variants of the commit/wait group
-def CP_ASYNC_BULK_COMMIT_GROUP :
- BasicNVPTXInst<(outs), (ins), "cp.async.bulk.commit_group",
- [(int_nvvm_cp_async_bulk_commit_group)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+let Predicates = [hasPTX<80>, hasSM<90>] in {
+ // cp.async.bulk variants of the commit/wait group
+ def CP_ASYNC_BULK_COMMIT_GROUP :
+ NullaryInst<"cp.async.bulk.commit_group", int_nvvm_cp_async_bulk_commit_group>;
-def CP_ASYNC_BULK_WAIT_GROUP :
- BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group",
- [(int_nvvm_cp_async_bulk_wait_group timm:$n)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ def CP_ASYNC_BULK_WAIT_GROUP :
+ BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group",
+ [(int_nvvm_cp_async_bulk_wait_group timm:$n)]>;
-def CP_ASYNC_BULK_WAIT_GROUP_READ :
- BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group.read",
- [(int_nvvm_cp_async_bulk_wait_group_read timm:$n)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ def CP_ASYNC_BULK_WAIT_GROUP_READ :
+ BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group.read",
+ [(int_nvvm_cp_async_bulk_wait_group_read timm:$n)]>;
+}
//------------------------------
// TMA Async Bulk Copy Functions
@@ -974,33 +952,30 @@ defm TMA_TENSOR_PF_TILE_GATHER4_2D : TMA_TENSOR_PREFETCH_INTR<5, "tile_gather4",
//Prefetch and Prefetchu
-class PREFETCH_INTRS<string InstName> :
- BasicNVPTXInst<(outs), (ins ADDR:$addr),
- InstName,
- [(!cast<Intrinsic>(!strconcat("int_nvvm_",
- !subst(".", "_", InstName))) addr:$addr)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
-
+let Predicates = [hasPTX<80>, hasSM<90>] in {
+ class PREFETCH_INTRS<string InstName> :
+ BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ InstName,
+ [(!cast<Intrinsic>(!strconcat("int_nvvm_",
+ !subst(".", "_", InstName))) addr:$addr)]>;
-def PREFETCH_L1 : PREFETCH_INTRS<"prefetch.L1">;
-def PREFETCH_L2 : PREFETCH_INTRS<"prefetch.L2">;
-def PREFETCH_GLOBAL_L1 : PREFETCH_INTRS<"prefetch.global.L1">;
-def PREFETCH_LOCAL_L1 : PREFETCH_INTRS<"prefetch.local.L1">;
-def PREFETCH_GLOBAL_L2 : PREFETCH_INTRS<"prefetch.global.L2">;
-def PREFETCH_LOCAL_L2 : PREFETCH_INTRS<"prefetch.local.L2">;
+ def PREFETCH_L1 : PREFETCH_INTRS<"prefetch.L1">;
+ def PREFETCH_L2 : PREFETCH_INTRS<"prefetch.L2">;
+ def PREFETCH_GLOBAL_L1 : PREFETCH_INTRS<"prefetch.global.L1">;
+ def PREFETCH_LOCAL_L1 : PREFETCH_INTRS<"prefetch.local.L1">;
+ def PREFETCH_GLOBAL_L2 : PREFETCH_INTRS<"prefetch.global.L2">;
+ def PREFETCH_LOCAL_L2 : PREFETCH_INTRS<"prefetch.local.L2">;
-def PREFETCH_GLOBAL_L2_EVICT_NORMAL : BasicNVPTXInst<(outs), (ins ADDR:$addr),
- "prefetch.global.L2::evict_normal",
- [(int_nvvm_prefetch_global_L2_evict_normal addr:$addr)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ def PREFETCH_GLOBAL_L2_EVICT_NORMAL : BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ "prefetch.global.L2::evict_normal",
+ [(int_nvvm_prefetch_global_L2_evict_normal addr:$addr)]>;
-def PREFETCH_GLOBAL_L2_EVICT_LAST : BasicNVPTXInst<(outs), (ins ADDR:$addr),
- "prefetch.global.L2::evict_last",
- [(int_nvvm_prefetch_global_L2_evict_last addr:$addr)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ def PREFETCH_GLOBAL_L2_EVICT_LAST : BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ "prefetch.global.L2::evict_last",
+ [(int_nvvm_prefetch_global_L2_evict_last addr:$addr)]>;
-
-def PREFETCHU_L1 : PREFETCH_INTRS<"prefetchu.L1">;
+ def PREFETCHU_L1 : PREFETCH_INTRS<"prefetchu.L1">;
+}
//Applypriority intrinsics
class APPLYPRIORITY_L2_INTRS<string addrspace> :
@@ -1031,99 +1006,82 @@ def DISCARD_GLOBAL_L2 : DISCARD_L2_INTRS<"global">;
// MBarrier Functions
//-----------------------------------
-multiclass MBARRIER_INIT<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs), (ins ADDR:$addr, B32:$count),
- "mbarrier.init" # AddrSpace # ".b64",
- [(Intrin addr:$addr, i32:$count)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_INIT : MBARRIER_INIT<"", int_nvvm_mbarrier_init>;
-defm MBARRIER_INIT_SHARED : MBARRIER_INIT<".shared",
- int_nvvm_mbarrier_init_shared>;
-
-multiclass MBARRIER_INVAL<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs), (ins ADDR:$addr),
- "mbarrier.inval" # AddrSpace # ".b64",
- [(Intrin addr:$addr)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_INVAL : MBARRIER_INVAL<"", int_nvvm_mbarrier_inval>;
-defm MBARRIER_INVAL_SHARED : MBARRIER_INVAL<".shared",
- int_nvvm_mbarrier_inval_shared>;
-
-multiclass MBARRIER_ARRIVE<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B64:$state), (ins ADDR:$addr),
- "mbarrier.arrive" # AddrSpace # ".b64",
- [(set i64:$state, (Intrin addr:$addr))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_ARRIVE : MBARRIER_ARRIVE<"", int_nvvm_mbarrier_arrive>;
-defm MBARRIER_ARRIVE_SHARED :
- MBARRIER_ARRIVE<".shared", int_nvvm_mbarrier_arrive_shared>;
-
-multiclass MBARRIER_ARRIVE_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B64:$state),
- (ins ADDR:$addr, B32:$count),
- "mbarrier.arrive.noComplete" # AddrSpace # ".b64",
- [(set i64:$state, (Intrin addr:$addr, i32:$count))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_ARRIVE_NOCOMPLETE :
- MBARRIER_ARRIVE_NOCOMPLETE<"", int_nvvm_mbarrier_arrive_noComplete>;
-defm MBARRIER_ARRIVE_NOCOMPLETE_SHARED :
- MBARRIER_ARRIVE_NOCOMPLETE<".shared", int_nvvm_mbarrier_arrive_noComplete_shared>;
-
-multiclass MBARRIER_ARRIVE_DROP<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B64:$state), (ins ADDR:$addr),
- "mbarrier.arrive_drop" # AddrSpace # ".b64",
- [(set i64:$state, (Intrin addr:$addr))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_ARRIVE_DROP :
- MBARRIER_ARRIVE_DROP<"", int_nvvm_mbarrier_arrive_drop>;
-defm MBARRIER_ARRIVE_DROP_SHARED :
- MBARRIER_ARRIVE_DROP<".shared", int_nvvm_mbarrier_arrive_drop_shared>;
-
-multiclass MBARRIER_ARRIVE_DROP_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B64:$state),
- (ins ADDR:$addr, B32:$count),
- "mbarrier.arrive_drop.noComplete" # AddrSpace # ".b64",
- [(set i64:$state, (Intrin addr:$addr, i32:$count))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_ARRIVE_DROP_NOCOMPLETE :
- MBARRIER_ARRIVE_DROP_NOCOMPLETE<"", int_nvvm_mbarrier_arrive_drop_noComplete>;
-defm MBARRIER_ARRIVE_DROP_NOCOMPLETE_SHARED :
- MBARRIER_ARRIVE_DROP_NOCOMPLETE<".shared",
- int_nvvm_mbarrier_arrive_drop_noComplete_shared>;
-
-multiclass MBARRIER_TEST_WAIT<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B1:$res), (ins ADDR:$addr, B64:$state),
- "mbarrier.test_wait" # AddrSpace # ".b64",
- [(set i1:$res, (Intrin addr:$addr, i64:$state))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
+let Predicates = [hasPTX<70>, hasSM<80>] in {
+ class MBARRIER_INIT<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs), (ins ADDR:$addr, B32:$count),
+ "mbarrier.init" # AddrSpace # ".b64",
+ [(Intrin addr:$addr, i32:$count)]>;
+
+ def MBARRIER_INIT : MBARRIER_INIT<"", int_nvvm_mbarrier_init>;
+ def MBARRIER_INIT_SHARED : MBARRIER_INIT<".shared",
+ int_nvvm_mbarrier_init_shared>;
+
+ class MBARRIER_INVAL<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ "mbarrier.inval" # AddrSpace # ".b64",
+ [(Intrin addr:$addr)]>;
+
+ def MBARRIER_INVAL : MBARRIER_INVAL<"", int_nvvm_mbarrier_inval>;
+ def MBARRIER_INVAL_SHARED : MBARRIER_INVAL<".shared",
+ int_nvvm_mbarrier_inval_shared>;
+
+ class MBARRIER_ARRIVE<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B64:$state), (ins ADDR:$addr),
+ "mbarrier.arrive" # AddrSpace # ".b64",
+ [(set i64:$state, (Intrin addr:$addr))]>;
+
+ def MBARRIER_ARRIVE : MBARRIER_ARRIVE<"", int_nvvm_mbarrier_arrive>;
+ def MBARRIER_ARRIVE_SHARED :
+ MBARRIER_ARRIVE<".shared", int_nvvm_mbarrier_arrive_shared>;
+
+ class MBARRIER_ARRIVE_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B64:$state),
+ (ins ADDR:$addr, B32:$count),
+ "mbarrier.arrive.noComplete" # AddrSpace # ".b64",
+ [(set i64:$state, (Intrin addr:$addr, i32:$count))]>;
+
+ def MBARRIER_ARRIVE_NOCOMPLETE :
+ MBARRIER_ARRIVE_NOCOMPLETE<"", int_nvvm_mbarrier_arrive_noComplete>;
+ def MBARRIER_ARRIVE_NOCOMPLETE_SHARED :
+ MBARRIER_ARRIVE_NOCOMPLETE<".shared", int_nvvm_mbarrier_arrive_noComplete_shared>;
+
+ class MBARRIER_ARRIVE_DROP<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B64:$state), (ins ADDR:$addr),
+ "mbarrier.arrive_drop" # AddrSpace # ".b64",
+ [(set i64:$state, (Intrin addr:$addr))]>;
+
+ def MBARRIER_ARRIVE_DROP :
+ MBARRIER_ARRIVE_DROP<"", int_nvvm_mbarrier_arrive_drop>;
+ def MBARRIER_ARRIVE_DROP_SHARED :
+ MBARRIER_ARRIVE_DROP<".shared", int_nvvm_mbarrier_arrive_drop_shared>;
+
+ class MBARRIER_ARRIVE_DROP_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B64:$state),
+ (ins ADDR:$addr, B32:$count),
+ "mbarrier.arrive_drop.noComplete" # AddrSpace # ".b64",
+ [(set i64:$state, (Intrin addr:$addr, i32:$count))]>;
+
+ def MBARRIER_ARRIVE_DROP_NOCOMPLETE :
+ MBARRIER_ARRIVE_DROP_NOCOMPLETE<"", int_nvvm_mbarrier_arrive_drop_noComplete>;
+ def MBARRIER_ARRIVE_DROP_NOCOMPLETE_SHARED :
+ MBARRIER_ARRIVE_DROP_NOCOMPLETE<".shared",
+ int_nvvm_mbarrier_arrive_drop_noComplete_shared>;
+
+ class MBARRIER_TEST_WAIT<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B1:$res), (ins ADDR:$addr, B64:$state),
+ "mbarrier.test_wait" # AddrSpace # ".b64",
+ [(set i1:$res, (Intrin addr:$addr, i64:$state))]>;
+
+ def MBARRIER_TEST_WAIT :
+ MBARRIER_TEST_WAIT<"", int_nvvm_mbarrier_test_wait>;
+ def MBARRIER_TEST_WAIT_SHARED :
+ MBARRIER_TEST_WAIT<".shared", int_nvvm_mbarrier_test_wait_shared>;
+
+ def MBARRIER_PENDING_COUNT :
+ BasicNVPTXInst<(outs B32:$res), (ins B64:$state),
+ "mbarrier.pending_count.b64",
+ [(set i32:$res, (int_nvvm_mbarrier_pending_count i64:$state))]>;
}
-
-defm MBARRIER_TEST_WAIT :
- MBARRIER_TEST_WAIT<"", int_nvvm_mbarrier_test_wait>;
-defm MBARRIER_TEST_WAIT_SHARED :
- MBARRIER_TEST_WAIT<".shared", int_nvvm_mbarrier_test_wait_shared>;
-
-class MBARRIER_PENDING_COUNT<Intrinsic Intrin> :
- BasicNVPTXInst<(outs B32:$res), (ins B64:$state),
- "mbarrier.pending_count.b64",
- [(set i32:$res, (Intrin i64:$state))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-
-def MBARRIER_PENDING_COUNT :
- MBARRIER_PENDING_COUNT<int_nvvm_mbarrier_pending_count>;
-
//-----------------------------------
// Math Functions
//-----------------------------------
@@ -1449,15 +1407,11 @@ defm ABS_F64 : F_ABS<"f64", F64RT, support_ftz = false>;
def fcopysign_nvptx : SDNode<"NVPTXISD::FCOPYSIGN", SDTFPBinOp>;
-def COPYSIGN_F :
- BasicNVPTXInst<(outs B32:$dst), (ins B32:$src0, B32:$src1),
- "copysign.f32",
- [(set f32:$dst, (fcopysign_nvptx f32:$src1, f32:$src0))]>;
-
-def COPYSIGN_D :
- BasicNVPTXInst<(outs B64:$dst), (ins B64:$src0, B64:$src1),
- "copysign.f64",
- [(set f64:$dst, (fcopysign_nvptx f64:$src1, f64:$src0))]>;
+foreach t = [F32RT, F64RT] in
+ def COPYSIGN_ # t :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src0, t.RC:$src1),
+ "copysign." # t.PtxType,
+ [(set t.Ty:$dst, (fcopysign_nvptx t.Ty:$src1, t.Ty:$src0))]>;
//
// Neg bf16, bf16x2
@@ -2255,38 +2209,35 @@ defm INT_PTX_SATOM_XOR : ATOM2_bitwise_impl<"xor">;
// Scalar
-class LDU_G<string TyStr, NVPTXRegClass regclass>
- : NVPTXInst<(outs regclass:$result), (ins ADDR:$src),
- "ldu.global." # TyStr # " \t$result, [$src];", []>;
+class LDU_G<NVPTXRegClass regclass>
+ : NVPTXInst<(outs regclass:$result), (ins i32imm:$fromWidth, ADDR:$src),
+ "ldu.global.b$fromWidth \t$result, [$src];", []>;
-def LDU_GLOBAL_i8 : LDU_G<"b8", B16>;
-def LDU_GLOBAL_i16 : LDU_G<"b16", B16>;
-def LDU_GLOBAL_i32 : LDU_G<"b32", B32>;
-def LDU_GLOBAL_i64 : LDU_G<"b64", B64>;
+def LDU_GLOBAL_i16 : LDU_G<B16>;
+def LDU_GLOBAL_i32 : LDU_G<B32>;
+def LDU_GLOBAL_i64 : LDU_G<B64>;
// vector
// Elementized vector ldu
-class VLDU_G_ELE_V2<string TyStr, NVPTXRegClass regclass>
+class VLDU_G_ELE_V2<NVPTXRegClass regclass>
: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
- (ins ADDR:$src),
- "ldu.global.v2." # TyStr # " \t{{$dst1, $dst2}}, [$src];", []>;
+ (ins i32imm:$fromWidth, ADDR:$src),
+ "ldu.global.v2.b$fromWidth \t{{$dst1, $dst2}}, [$src];", []>;
-class VLDU_G_ELE_V4<string TyStr, NVPTXRegClass regclass>
- : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
- regclass:$dst4), (ins ADDR:$src),
- "ldu.global.v4." # TyStr # " \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", []>;
+class VLDU_G_ELE_V4<NVPTXRegClass regclass>
+ : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins i32imm:$fromWidth, ADDR:$src),
+ "ldu.global.v4.b$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", []>;
-def LDU_GLOBAL_v2i8 : VLDU_G_ELE_V2<"b8", B16>;
-def LDU_GLOBAL_v2i16 : VLDU_G_ELE_V2<"b16", B16>;
-def LDU_GLOBAL_v2i32 : VLDU_G_ELE_V2<"b32", B32>;
-def LDU_GLOBAL_v2i64 : VLDU_G_ELE_V2<"b64", B64>;
+def LDU_GLOBAL_v2i16 : VLDU_G_ELE_V2<B16>;
+def LDU_GLOBAL_v2i32 : VLDU_G_ELE_V2<B32>;
+def LDU_GLOBAL_v2i64 : VLDU_G_ELE_V2<B64>;
-def LDU_GLOBAL_v4i8 : VLDU_G_ELE_V4<"b8", B16>;
-def LDU_GLOBAL_v4i16 : VLDU_G_ELE_V4<"b16", B16>;
-def LDU_GLOBAL_v4i32 : VLDU_G_ELE_V4<"b32", B32>;
+def LDU_GLOBAL_v4i16 : VLDU_G_ELE_V4<B16>;
+def LDU_GLOBAL_v4i32 : VLDU_G_ELE_V4<B32>;
//-----------------------------------
@@ -2327,12 +2278,10 @@ class VLDG_G_ELE_V8<NVPTXRegClass regclass> :
"ld.global.nc.v8.${Sign:sign}$fromWidth \t{{$dst1, $dst2, $dst3, $dst4, $dst5, $dst6, $dst7, $dst8}}, [$src];", []>;
// FIXME: 8-bit LDG should be fixed once LDG/LDU nodes are made into proper loads.
-def LD_GLOBAL_NC_v2i8 : VLDG_G_ELE_V2<B16>;
def LD_GLOBAL_NC_v2i16 : VLDG_G_ELE_V2<B16>;
def LD_GLOBAL_NC_v2i32 : VLDG_G_ELE_V2<B32>;
def LD_GLOBAL_NC_v2i64 : VLDG_G_ELE_V2<B64>;
-def LD_GLOBAL_NC_v4i8 : VLDG_G_ELE_V4<B16>;
def LD_GLOBAL_NC_v4i16 : VLDG_G_ELE_V4<B16>;
def LD_GLOBAL_NC_v4i32 : VLDG_G_ELE_V4<B32>;
@@ -2342,19 +2291,19 @@ def LD_GLOBAL_NC_v8i32 : VLDG_G_ELE_V8<B32>;
multiclass NG_TO_G<string Str, bit Supports32 = 1, list<Predicate> Preds = []> {
if Supports32 then
def "" : BasicNVPTXInst<(outs B32:$result), (ins B32:$src),
- "cvta." # Str # ".u32", []>, Requires<Preds>;
+ "cvta." # Str # ".u32">, Requires<Preds>;
def _64 : BasicNVPTXInst<(outs B64:$result), (ins B64:$src),
- "cvta." # Str # ".u64", []>, Requires<Preds>;
+ "cvta." # Str # ".u64">, Requires<Preds>;
}
multiclass G_TO_NG<string Str, bit Supports32 = 1, list<Predicate> Preds = []> {
if Supports32 then
def "" : BasicNVPTXInst<(outs B32:$result), (ins B32:$src),
- "cvta.to." # Str # ".u32", []>, Requires<Preds>;
+ "cvta.to." # Str # ".u32">, Requires<Preds>;
def _64 : BasicNVPTXInst<(outs B64:$result), (ins B64:$src),
- "cvta.to." # Str # ".u64", []>, Requires<Preds>;
+ "cvta.to." # Str # ".u64">, Requires<Preds>;
}
foreach space = ["local", "shared", "global", "const", "param"] in {
@@ -4614,9 +4563,9 @@ def INT_PTX_SREG_LANEMASK_GT :
PTX_READ_SREG_R32<"lanemask_gt", int_nvvm_read_ptx_sreg_lanemask_gt>;
let hasSideEffects = 1 in {
-def SREG_CLOCK : PTX_READ_SREG_R32<"clock", int_nvvm_read_ptx_sreg_clock>;
-def SREG_CLOCK64 : PTX_READ_SREG_R64<"clock64", int_nvvm_read_ptx_sreg_clock64>;
-def SREG_GLOBALTIMER : PTX_READ_SREG_R64<"globaltimer", int_nvvm_read_ptx_sreg_globaltimer>;
+ def SREG_CLOCK : PTX_READ_SREG_R32<"clock", int_nvvm_read_ptx_sreg_clock>;
+ def SREG_CLOCK64 : PTX_READ_SREG_R64<"clock64", int_nvvm_read_ptx_sreg_clock64>;
+ def SREG_GLOBALTIMER : PTX_READ_SREG_R64<"globaltimer", int_nvvm_read_ptx_sreg_globaltimer>;
}
def: Pat <(i64 (readcyclecounter)), (SREG_CLOCK64)>;
@@ -5096,37 +5045,36 @@ foreach mma = !listconcat(MMAs, WMMAs, MMA_LDSTs, LDMATRIXs, STMATRIXs) in
def : MMA_PAT<mma>;
multiclass MAPA<string suffix, Intrinsic Intr> {
- def _32: BasicNVPTXInst<(outs B32:$d), (ins B32:$a, B32:$b),
- "mapa" # suffix # ".u32",
- [(set i32:$d, (Intr i32:$a, i32:$b))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
- def _32i: BasicNVPTXInst<(outs B32:$d), (ins B32:$a, i32imm:$b),
- "mapa" # suffix # ".u32",
- [(set i32:$d, (Intr i32:$a, imm:$b))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
- def _64: BasicNVPTXInst<(outs B64:$d), (ins B64:$a, B32:$b),
- "mapa" # suffix # ".u64",
- [(set i64:$d, (Intr i64:$a, i32:$b))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
- def _64i: BasicNVPTXInst<(outs B64:$d), (ins B64:$a, i32imm:$b),
- "mapa" # suffix # ".u64",
- [(set i64:$d, (Intr i64:$a, imm:$b))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
+ let Predicates = [hasSM<90>, hasPTX<78>] in {
+ def _32: BasicNVPTXInst<(outs B32:$d), (ins B32:$a, B32:$b),
+ "mapa" # suffix # ".u32",
+ [(set i32:$d, (Intr i32:$a, i32:$b))]>;
+ def _32i: BasicNVPTXInst<(outs B32:$d), (ins B32:$a, i32imm:$b),
+ "mapa" # suffix # ".u32",
+ [(set i32:$d, (Intr i32:$a, imm:$b))]>;
+ def _64: BasicNVPTXInst<(outs B64:$d), (ins B64:$a, B32:$b),
+ "mapa" # suffix # ".u64",
+ [(set i64:$d, (Intr i64:$a, i32:$b))]>;
+ def _64i: BasicNVPTXInst<(outs B64:$d), (ins B64:$a, i32imm:$b),
+ "mapa" # suffix # ".u64",
+ [(set i64:$d, (Intr i64:$a, imm:$b))]>;
+ }
}
+
defm mapa : MAPA<"", int_nvvm_mapa>;
defm mapa_shared_cluster : MAPA<".shared::cluster", int_nvvm_mapa_shared_cluster>;
multiclass GETCTARANK<string suffix, Intrinsic Intr> {
- def _32: BasicNVPTXInst<(outs B32:$d), (ins B32:$a),
- "getctarank" # suffix # ".u32",
- [(set i32:$d, (Intr i32:$a))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
- def _64: BasicNVPTXInst<(outs B32:$d), (ins B64:$a),
- "getctarank" # suffix # ".u64",
- [(set i32:$d, (Intr i64:$a))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
+ let Predicates = [hasSM<90>, hasPTX<78>] in {
+ def _32: BasicNVPTXInst<(outs B32:$d), (ins B32:$a),
+ "getctarank" # suffix # ".u32",
+ [(set i32:$d, (Intr i32:$a))]>;
+ def _64: BasicNVPTXInst<(outs B32:$d), (ins B64:$a),
+ "getctarank" # suffix # ".u64",
+ [(set i32:$d, (Intr i64:$a))]>;
+ }
}
defm getctarank : GETCTARANK<"", int_nvvm_getctarank>;
@@ -5165,29 +5113,25 @@ def INT_NVVM_WGMMA_WAIT_GROUP_SYNC_ALIGNED : BasicNVPTXInst<(outs), (ins i64imm:
[(int_nvvm_wgmma_wait_group_sync_aligned timm:$n)]>, Requires<[hasSM90a, hasPTX<80>]>;
} // isConvergent = true
-def GRIDDEPCONTROL_LAUNCH_DEPENDENTS :
- BasicNVPTXInst<(outs), (ins),
- "griddepcontrol.launch_dependents",
- [(int_nvvm_griddepcontrol_launch_dependents)]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
-
-def GRIDDEPCONTROL_WAIT :
- BasicNVPTXInst<(outs), (ins),
- "griddepcontrol.wait",
- [(int_nvvm_griddepcontrol_wait)]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
+let Predicates = [hasSM<90>, hasPTX<78>] in {
+ def GRIDDEPCONTROL_LAUNCH_DEPENDENTS :
+ BasicNVPTXInst<(outs), (ins), "griddepcontrol.launch_dependents",
+ [(int_nvvm_griddepcontrol_launch_dependents)]>;
+ def GRIDDEPCONTROL_WAIT :
+ BasicNVPTXInst<(outs), (ins), "griddepcontrol.wait",
+ [(int_nvvm_griddepcontrol_wait)]>;
+}
def INT_EXIT : BasicNVPTXInst<(outs), (ins), "exit", [(int_nvvm_exit)]>;
// Tcgen05 intrinsics
-let isConvergent = true in {
+let isConvergent = true, Predicates = [hasTcgen05Instructions] in {
multiclass TCGEN05_ALLOC_INTR<string AS, string num, Intrinsic Intr> {
def "" : BasicNVPTXInst<(outs),
(ins ADDR:$dst, B32:$ncols),
"tcgen05.alloc.cta_group::" # num # ".sync.aligned" # AS # ".b32",
- [(Intr addr:$dst, B32:$ncols)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr addr:$dst, B32:$ncols)]>;
}
defm TCGEN05_ALLOC_CG1 : TCGEN05_ALLOC_INTR<"", "1", int_nvvm_tcgen05_alloc_cg1>;
@@ -5200,8 +5144,7 @@ multiclass TCGEN05_DEALLOC_INTR<string num, Intrinsic Intr> {
def "" : BasicNVPTXInst<(outs),
(ins B32:$tmem_addr, B32:$ncols),
"tcgen05.dealloc.cta_group::" # num # ".sync.aligned.b32",
- [(Intr B32:$tmem_addr, B32:$ncols)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr B32:$tmem_addr, B32:$ncols)]>;
}
defm TCGEN05_DEALLOC_CG1: TCGEN05_DEALLOC_INTR<"1", int_nvvm_tcgen05_dealloc_cg1>;
defm TCGEN05_DEALLOC_CG2: TCGEN05_DEALLOC_INTR<"2", int_nvvm_tcgen05_dealloc_cg2>;
@@ -5209,19 +5152,13 @@ defm TCGEN05_DEALLOC_CG2: TCGEN05_DEALLOC_INTR<"2", int_nvvm_tcgen05_dealloc_cg2
multiclass TCGEN05_RELINQ_PERMIT_INTR<string num, Intrinsic Intr> {
def "" : BasicNVPTXInst<(outs), (ins),
"tcgen05.relinquish_alloc_permit.cta_group::" # num # ".sync.aligned",
- [(Intr)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr)]>;
}
defm TCGEN05_RELINQ_CG1: TCGEN05_RELINQ_PERMIT_INTR<"1", int_nvvm_tcgen05_relinq_alloc_permit_cg1>;
defm TCGEN05_RELINQ_CG2: TCGEN05_RELINQ_PERMIT_INTR<"2", int_nvvm_tcgen05_relinq_alloc_permit_cg2>;
-def tcgen05_wait_ld: BasicNVPTXInst<(outs), (ins), "tcgen05.wait::ld.sync.aligned",
- [(int_nvvm_tcgen05_wait_ld)]>,
- Requires<[hasTcgen05Instructions]>;
-
-def tcgen05_wait_st: BasicNVPTXInst<(outs), (ins), "tcgen05.wait::st.sync.aligned",
- [(int_nvvm_tcgen05_wait_st)]>,
- Requires<[hasTcgen05Instructions]>;
+def tcgen05_wait_ld: NullaryInst<"tcgen05.wait::ld.sync.aligned", int_nvvm_tcgen05_wait_ld>;
+def tcgen05_wait_st: NullaryInst<"tcgen05.wait::st.sync.aligned", int_nvvm_tcgen05_wait_st>;
multiclass TCGEN05_COMMIT_INTR<string AS, string num> {
defvar prefix = "tcgen05.commit.cta_group::" # num #".mbarrier::arrive::one.shared::cluster";
@@ -5232,12 +5169,10 @@ multiclass TCGEN05_COMMIT_INTR<string AS, string num> {
def "" : BasicNVPTXInst<(outs), (ins ADDR:$mbar),
prefix # ".b64",
- [(Intr addr:$mbar)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr addr:$mbar)]>;
def _MC : BasicNVPTXInst<(outs), (ins ADDR:$mbar, B16:$mc),
prefix # ".multicast::cluster.b64",
- [(IntrMC addr:$mbar, B16:$mc)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(IntrMC addr:$mbar, B16:$mc)]>;
}
defm TCGEN05_COMMIT_CG1 : TCGEN05_COMMIT_INTR<"", "1">;
@@ -5249,8 +5184,7 @@ multiclass TCGEN05_SHIFT_INTR<string num, Intrinsic Intr> {
def "" : BasicNVPTXInst<(outs),
(ins ADDR:$tmem_addr),
"tcgen05.shift.cta_group::" # num # ".down",
- [(Intr addr:$tmem_addr)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr addr:$tmem_addr)]>;
}
defm TCGEN05_SHIFT_CG1: TCGEN05_SHIFT_INTR<"1", int_nvvm_tcgen05_shift_down_cg1>;
defm TCGEN05_SHIFT_CG2: TCGEN05_SHIFT_INTR<"2", int_nvvm_tcgen05_shift_down_cg2>;
@@ -5270,13 +5204,11 @@ multiclass TCGEN05_CP_INTR<string shape, string src_fmt, string mc = ""> {
def _cg1 : BasicNVPTXInst<(outs),
(ins ADDR:$tmem_addr, B64:$sdesc),
"tcgen05.cp.cta_group::1." # shape_mc_asm # fmt_asm,
- [(IntrCG1 addr:$tmem_addr, B64:$sdesc)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(IntrCG1 addr:$tmem_addr, B64:$sdesc)]>;
def _cg2 : BasicNVPTXInst<(outs),
(ins ADDR:$tmem_addr, B64:$sdesc),
"tcgen05.cp.cta_group::2." # shape_mc_asm # fmt_asm,
- [(IntrCG2 addr:$tmem_addr, B64:$sdesc)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(IntrCG2 addr:$tmem_addr, B64:$sdesc)]>;
}
foreach src_fmt = ["", "b6x16_p32", "b4x16_p64"] in {
@@ -5289,17 +5221,13 @@ foreach src_fmt = ["", "b6x16_p32", "b4x16_p64"] in {
}
} // isConvergent
-let hasSideEffects = 1 in {
+let hasSideEffects = 1, Predicates = [hasTcgen05Instructions] in {
-def tcgen05_fence_before_thread_sync: BasicNVPTXInst<(outs), (ins),
- "tcgen05.fence::before_thread_sync",
- [(int_nvvm_tcgen05_fence_before_thread_sync)]>,
- Requires<[hasTcgen05Instructions]>;
+ def tcgen05_fence_before_thread_sync: NullaryInst<
+ "tcgen05.fence::before_thread_sync", int_nvvm_tcgen05_fence_before_thread_sync>;
-def tcgen05_fence_after_thread_sync: BasicNVPTXInst<(outs), (ins),
- "tcgen05.fence::after_thread_sync",
- [(int_nvvm_tcgen05_fence_after_thread_sync)]>,
- Requires<[hasTcgen05Instructions]>;
+ def tcgen05_fence_after_thread_sync: NullaryInst<
+ "tcgen05.fence::after_thread_sync", int_nvvm_tcgen05_fence_after_thread_sync>;
} // hasSideEffects
@@ -5392,17 +5320,17 @@ foreach shape = ["16x64b", "16x128b", "16x256b", "32x32b", "16x32bx2"] in {
// Bulk store instructions
def st_bulk_imm : TImmLeaf<i64, [{ return Imm == 0; }]>;
-def INT_NVVM_ST_BULK_GENERIC :
- BasicNVPTXInst<(outs), (ins ADDR:$dest_addr, B64:$size, i64imm:$value),
- "st.bulk",
- [(int_nvvm_st_bulk addr:$dest_addr, i64:$size, st_bulk_imm:$value)]>,
- Requires<[hasSM<100>, hasPTX<86>]>;
+let Predicates = [hasSM<100>, hasPTX<86>] in {
+ def INT_NVVM_ST_BULK_GENERIC :
+ BasicNVPTXInst<(outs), (ins ADDR:$dest_addr, B64:$size, i64imm:$value),
+ "st.bulk",
+ [(int_nvvm_st_bulk addr:$dest_addr, i64:$size, st_bulk_imm:$value)]>;
-def INT_NVVM_ST_BULK_SHARED_CTA:
- BasicNVPTXInst<(outs), (ins ADDR:$dest_addr, B64:$size, i64imm:$value),
- "st.bulk.shared::cta",
- [(int_nvvm_st_bulk_shared_cta addr:$dest_addr, i64:$size, st_bulk_imm:$value)]>,
- Requires<[hasSM<100>, hasPTX<86>]>;
+ def INT_NVVM_ST_BULK_SHARED_CTA:
+ BasicNVPTXInst<(outs), (ins ADDR:$dest_addr, B64:$size, i64imm:$value),
+ "st.bulk.shared::cta",
+ [(int_nvvm_st_bulk_shared_cta addr:$dest_addr, i64:$size, st_bulk_imm:$value)]>;
+}
//
// clusterlaunchcontorl Instructions
diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
index d40886a..2e81ab1 100644
--- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
@@ -38,14 +38,6 @@ foreach i = 0...4 in {
def R#i : NVPTXReg<"%r"#i>; // 32-bit
def RL#i : NVPTXReg<"%rd"#i>; // 64-bit
def RQ#i : NVPTXReg<"%rq"#i>; // 128-bit
- def H#i : NVPTXReg<"%h"#i>; // 16-bit float
- def HH#i : NVPTXReg<"%hh"#i>; // 2x16-bit float
-
- // Arguments
- def ia#i : NVPTXReg<"%ia"#i>;
- def la#i : NVPTXReg<"%la"#i>;
- def fa#i : NVPTXReg<"%fa"#i>;
- def da#i : NVPTXReg<"%da"#i>;
}
foreach i = 0...31 in {
diff --git a/llvm/lib/Target/PowerPC/PPCInstrP10.td b/llvm/lib/Target/PowerPC/PPCInstrP10.td
index d295f35..1dc485d 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrP10.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrP10.td
@@ -2159,8 +2159,115 @@ let AddedComplexity = 400, Predicates = [IsISA3_1, HasVSX] in {
(COPY_TO_REGCLASS $VRB, VSRC), 2)))>;
}
-class XXEvalPattern <dag pattern, bits<8> imm> :
- Pat<(v4i32 pattern), (XXEVAL $vA, $vB, $vC, imm)> {}
+// =============================================================================
+// XXEVAL Instruction Pattern Definitions
+// =============================================================================
+//
+// XXEVAL instruction performs 256 different logical operations on three vector
+// operands using an 8-bit immediate value to select the operation.
+// Format: xxeval XT, XA, XB, XC, IMM
+// For example:
+// Equivalent function A?xor(B,C):and(B,C) is performed by
+// xxeval XT, XA, XB, XC, 22
+//
+// REGISTER CLASS CONSTRAINTS:
+// - XXEVAL natively supports: VSRC register class [v4i32, v4f32, v2f64, v2i64]
+// - Other vector types [v16i8, v8i16] require COPY_TO_REGCLASS to/from VRRC
+// =============================================================================
+
+class XXEvalPattern<dag pattern, bits<8> imm>
+ : Pat<(v4i32 pattern), (XXEVAL $vA, $vB, $vC, imm)> {}
+
+class XXEvalPatterns<ValueType Vt, dag InputPattern, bits<8> Imm>
+ : Pat<(Vt InputPattern),
+ !if(!or(!eq(Vt, v4i32), !eq(Vt, v2i64)),
+ // VSRC path: direct XXEVAL for v4i32 and v2i64
+ (XXEVAL $vA, $vB, $vC, Imm),
+ // VRRC path: wrap with COPY_TO_REGCLASS for other types
+ (COPY_TO_REGCLASS(XXEVAL(COPY_TO_REGCLASS Vt:$vA, VSRC),
+ (COPY_TO_REGCLASS Vt:$vB, VSRC),
+ (COPY_TO_REGCLASS Vt:$vC, VSRC), Imm),
+ VRRC))> {}
+
+// =============================================================================
+// PatFrags for Bitcast-Aware Vector bitwise Operations
+//
+// Each PatFrags defines TWO alternatives for pattern matcher to choose:
+// - Direct operation (for v4i32)
+// - Bitcast operation (for other types: v2i64, v16i8, v8i16)
+// =============================================================================
+
+// Basic Binary Operations
+def VAnd
+ : PatFrags<(ops node:$a, node:$b), [(and node:$a, node:$b),
+ (bitconvert(and
+ (v4i32(bitconvert node:$a)),
+ (v4i32(bitconvert node:$b))))]>;
+
+def VXor
+ : PatFrags<(ops node:$a, node:$b), [(xor node:$a, node:$b),
+ (bitconvert(xor
+ (v4i32(bitconvert node:$a)),
+ (v4i32(bitconvert node:$b))))]>;
+
+def VOr : PatFrags<(ops node:$a, node:$b), [(or node:$a, node:$b),
+ (bitconvert(or
+ (v4i32(bitconvert node:$a)),
+ (v4i32(bitconvert node:$b))))]>;
+
+def VNot
+ : PatFrags<(ops node:$a), [(vnot node:$a),
+ (bitconvert(vnot(v4i32(bitconvert node:$a))))]>;
+
+// Derived bitwise operations
+// Vector NOR operation (not(or))
+def VNor
+ : PatFrags<(ops node:$a, node:$b), [(vnot(or node:$a, node:$b)),
+ (bitconvert(vnot(or
+ (v4i32(bitconvert node:$a)),
+ (v4i32(bitconvert node:$b)))))]>;
+
+// Vector EQV operation (not(xor))
+def VEqv
+ : PatFrags<(ops node:$a, node:$b), [(vnot(xor node:$a, node:$b)),
+ (bitconvert(vnot(xor
+ (v4i32(bitconvert node:$a)),
+ (v4i32(bitconvert node:$b)))))]>;
+
+// =============================================================================
+// XXEVAL Ternary Pattern Multiclass: XXEvalTernarySelectAnd
+// This class matches the equivalent Ternary Operation: A ? f(B,C) : AND(B,C)
+// and emit the corresponding xxeval instruction with the imm value.
+//
+// The patterns implement xxeval vector select operations where:
+// - A is the selector vector
+// - f(B,C) is the "true" case op on vectors B and C (XOR, NOR, EQV, or NOT)
+// - AND(B,C) is the "false" case op on vectors B and C
+// =============================================================================
+multiclass XXEvalTernarySelectAnd<ValueType Vt> {
+ // Pattern: A ? XOR(B,C) : AND(B,C) XXEVAL immediate value: 22
+ def : XXEvalPatterns<
+ Vt, (vselect Vt:$vA, (VXor Vt:$vB, Vt:$vC), (VAnd Vt:$vB, Vt:$vC)),
+ 22>;
+
+ // Pattern: A ? NOR(B,C) : AND(B,C) XXEVAL immediate value: 24
+ def : XXEvalPatterns<
+ Vt, (vselect Vt:$vA, (VNor Vt:$vB, Vt:$vC), (VAnd Vt:$vB, Vt:$vC)),
+ 24>;
+
+ // Pattern: A ? EQV(B,C) : AND(B,C) XXEVAL immediate value: 25
+ def : XXEvalPatterns<
+ Vt, (vselect Vt:$vA, (VEqv Vt:$vB, Vt:$vC), (VAnd Vt:$vB, Vt:$vC)),
+ 25>;
+
+ // Pattern: A ? NOT(C) : AND(B,C) XXEVAL immediate value: 26
+ def : XXEvalPatterns<
+ Vt, (vselect Vt:$vA, (VNot Vt:$vC), (VAnd Vt:$vB, Vt:$vC)), 26>;
+
+ // Pattern: A ? NOT(B) : AND(B,C) XXEVAL immediate value: 28
+ def : XXEvalPatterns<
+ Vt, (vselect Vt:$vA, (VNot Vt:$vB), (VAnd Vt:$vB, Vt:$vC)), 28>;
+}
let Predicates = [PrefixInstrs, HasP10Vector] in {
let AddedComplexity = 400 in {
@@ -2270,6 +2377,11 @@ let Predicates = [PrefixInstrs, HasP10Vector] in {
// (xor A, (or B, C))
def : XXEvalPattern<(xor v4i32:$vA, (or v4i32:$vB, v4i32:$vC)), 120>;
+ // XXEval Patterns for ternary Operations.
+ foreach Ty = [v4i32, v2i64, v8i16, v16i8] in {
+ defm : XXEvalTernarySelectAnd<Ty>;
+ }
+
// Anonymous patterns to select prefixed VSX loads and stores.
// Load / Store f128
def : Pat<(f128 (load PDForm:$src)),
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td
index 4c303a9..da6b95d 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.td
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -95,3 +95,7 @@ def CSR_XLEN_F32_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F32_V_Interrupt,
// Same as CSR_XLEN_F64_V_Interrupt, but excluding X16-X31.
def CSR_XLEN_F64_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F64_V_Interrupt,
(sequence "X%u", 16, 31))>;
+
+def CSR_RT_MostRegs : CalleeSavedRegs<(sub CSR_Interrupt, X6, X7, X28)>;
+def CSR_RT_MostRegs_RVE : CalleeSavedRegs<(sub CSR_RT_MostRegs,
+ (sequence "X%u", 16, 31))>;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 34910b7..f223fdbe 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -634,7 +634,7 @@ bool RISCVDAGToDAGISel::trySignedBitfieldExtract(SDNode *Node) {
// Transform (sra (shl X, C1) C2) with C1 < C2
// -> (SignedBitfieldExtract X, msb, lsb)
if (N0.getOpcode() == ISD::SHL) {
- auto *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+ auto *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!N01C)
return false;
@@ -750,7 +750,7 @@ bool RISCVDAGToDAGISel::trySignedBitfieldInsertInSign(SDNode *Node) {
// Transform (sra (shl X, C1) C2) with C1 > C2
// -> (NDS.BFOS X, lsb, msb)
if (N0.getOpcode() == ISD::SHL) {
- auto *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+ auto *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!N01C)
return false;
@@ -1191,7 +1191,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C)
// where C2 has 32 leading zeros and C3 trailing zeros.
SDNode *SRLIW = CurDAG->getMachineNode(
- RISCV::SRLIW, DL, VT, N0->getOperand(0),
+ RISCV::SRLIW, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(TrailingZeros, DL, VT));
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
@@ -1210,7 +1210,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// - without Zba a tablegen pattern applies the very same
// transform as we would have done here
SDNode *SLLI = CurDAG->getMachineNode(
- RISCV::SLLI, DL, VT, N0->getOperand(0),
+ RISCV::SLLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(LeadingZeros, DL, VT));
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
@@ -1239,7 +1239,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
unsigned TrailingZeros = llvm::countr_zero(Mask);
if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
SDNode *SRLIW = CurDAG->getMachineNode(
- RISCV::SRLIW, DL, VT, N0->getOperand(0),
+ RISCV::SRLIW, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(TrailingZeros, DL, VT));
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
@@ -1266,7 +1266,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (TrailingOnes == 32) {
SDNode *SRLI = CurDAG->getMachineNode(
Subtarget->is64Bit() ? RISCV::SRLIW : RISCV::SRLI, DL, VT,
- N0->getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT));
+ N0.getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT));
ReplaceNode(Node, SRLI);
return;
}
@@ -1279,19 +1279,19 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (HasBitTest && ShAmt + 1 == TrailingOnes) {
SDNode *BEXTI = CurDAG->getMachineNode(
Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST, DL, VT,
- N0->getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT));
+ N0.getOperand(0), CurDAG->getTargetConstant(ShAmt, DL, VT));
ReplaceNode(Node, BEXTI);
return;
}
const unsigned Msb = TrailingOnes - 1;
const unsigned Lsb = ShAmt;
- if (tryUnsignedBitfieldExtract(Node, DL, VT, N0->getOperand(0), Msb, Lsb))
+ if (tryUnsignedBitfieldExtract(Node, DL, VT, N0.getOperand(0), Msb, Lsb))
return;
unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
SDNode *SLLI =
- CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(LShAmt, DL, VT));
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
@@ -1328,7 +1328,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
break;
unsigned LShAmt = Subtarget->getXLen() - ExtSize;
SDNode *SLLI =
- CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(LShAmt, DL, VT));
SDNode *SRAI = CurDAG->getMachineNode(
RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
@@ -2942,8 +2942,8 @@ bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
/// Similar to SelectAddrRegImm, except that the offset is restricted to uimm9.
bool RISCVDAGToDAGISel::SelectAddrRegImm9(SDValue Addr, SDValue &Base,
SDValue &Offset) {
- // FIXME: Support FrameIndex. Need to teach eliminateFrameIndex that only
- // a 9-bit immediate can be folded.
+ if (SelectAddrFrameIndex(Addr, Base, Offset))
+ return true;
SDLoc DL(Addr);
MVT VT = Addr.getSimpleValueType();
@@ -2953,8 +2953,8 @@ bool RISCVDAGToDAGISel::SelectAddrRegImm9(SDValue Addr, SDValue &Base,
if (isUInt<9>(CVal)) {
Base = Addr.getOperand(0);
- // FIXME: Support FrameIndex. Need to teach eliminateFrameIndex that only
- // a 9-bit immediate can be folded.
+ if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
Offset = CurDAG->getSignedTargetConstant(CVal, DL, VT);
return true;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 607edd3..c0ada51 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2739,27 +2739,6 @@ bool RISCVTargetLowering::isLegalElementTypeForRVV(EVT ScalarTy) const {
}
}
-bool RISCVTargetLowering::isLegalLoadStoreElementTypeForRVV(
- EVT ScalarTy) const {
- if (!ScalarTy.isSimple())
- return false;
- switch (ScalarTy.getSimpleVT().SimpleTy) {
- case MVT::iPTR:
- return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- case MVT::f16:
- case MVT::bf16:
- case MVT::f32:
- return true;
- case MVT::i64:
- case MVT::f64:
- return Subtarget.hasVInstructionsI64();
- default:
- return false;
- }
-}
unsigned RISCVTargetLowering::combineRepeatedFPDivisors() const {
return NumRepeatedDivisors;
@@ -20751,6 +20730,53 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
return DAG.getAllOnesConstant(DL, VT);
return DAG.getConstant(0, DL, VT);
}
+ case Intrinsic::riscv_vsseg2_mask:
+ case Intrinsic::riscv_vsseg3_mask:
+ case Intrinsic::riscv_vsseg4_mask:
+ case Intrinsic::riscv_vsseg5_mask:
+ case Intrinsic::riscv_vsseg6_mask:
+ case Intrinsic::riscv_vsseg7_mask:
+ case Intrinsic::riscv_vsseg8_mask: {
+ SDValue Tuple = N->getOperand(2);
+ unsigned NF = Tuple.getValueType().getRISCVVectorTupleNumFields();
+
+ if (Subtarget.hasOptimizedSegmentLoadStore(NF) || !Tuple.hasOneUse() ||
+ Tuple.getOpcode() != RISCVISD::TUPLE_INSERT ||
+ !Tuple.getOperand(0).isUndef())
+ return SDValue();
+
+ SDValue Val = Tuple.getOperand(1);
+ unsigned Idx = Tuple.getConstantOperandVal(2);
+
+ unsigned SEW = Val.getValueType().getScalarSizeInBits();
+ assert(Log2_64(SEW) == N->getConstantOperandVal(6) &&
+ "Type mismatch without bitcast?");
+ unsigned Stride = SEW / 8 * NF;
+ unsigned Offset = SEW / 8 * Idx;
+
+ SDValue Ops[] = {
+ /*Chain=*/N->getOperand(0),
+ /*IntID=*/
+ DAG.getTargetConstant(Intrinsic::riscv_vsse_mask, DL, XLenVT),
+ /*StoredVal=*/Val,
+ /*Ptr=*/
+ DAG.getNode(ISD::ADD, DL, XLenVT, N->getOperand(3),
+ DAG.getConstant(Offset, DL, XLenVT)),
+ /*Stride=*/DAG.getConstant(Stride, DL, XLenVT),
+ /*Mask=*/N->getOperand(4),
+ /*VL=*/N->getOperand(5)};
+
+ auto *OldMemSD = cast<MemIntrinsicSDNode>(N);
+ // Match getTgtMemIntrinsic for non-unit stride case
+ EVT MemVT = OldMemSD->getMemoryVT().getScalarType();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ OldMemSD->getMemOperand(), Offset, MemoryLocation::UnknownSize);
+
+ SDVTList VTs = DAG.getVTList(MVT::Other);
+ return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, VTs, Ops, MemVT,
+ MMO);
+ }
}
}
case ISD::EXPERIMENTAL_VP_REVERSE:
@@ -20843,6 +20869,68 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
}
break;
}
+ case RISCVISD::TUPLE_EXTRACT: {
+ EVT VT = N->getValueType(0);
+ SDValue Tuple = N->getOperand(0);
+ unsigned Idx = N->getConstantOperandVal(1);
+ if (!Tuple.hasOneUse() || Tuple.getOpcode() != ISD::INTRINSIC_W_CHAIN)
+ break;
+
+ unsigned NF = 0;
+ switch (Tuple.getConstantOperandVal(1)) {
+ default:
+ break;
+ case Intrinsic::riscv_vlseg2_mask:
+ case Intrinsic::riscv_vlseg3_mask:
+ case Intrinsic::riscv_vlseg4_mask:
+ case Intrinsic::riscv_vlseg5_mask:
+ case Intrinsic::riscv_vlseg6_mask:
+ case Intrinsic::riscv_vlseg7_mask:
+ case Intrinsic::riscv_vlseg8_mask:
+ NF = Tuple.getValueType().getRISCVVectorTupleNumFields();
+ break;
+ }
+
+ if (!NF || Subtarget.hasOptimizedSegmentLoadStore(NF))
+ break;
+
+ unsigned SEW = VT.getScalarSizeInBits();
+ assert(Log2_64(SEW) == Tuple.getConstantOperandVal(7) &&
+ "Type mismatch without bitcast?");
+ unsigned Stride = SEW / 8 * NF;
+ unsigned Offset = SEW / 8 * Idx;
+
+ SDValue Ops[] = {
+ /*Chain=*/Tuple.getOperand(0),
+ /*IntID=*/DAG.getTargetConstant(Intrinsic::riscv_vlse_mask, DL, XLenVT),
+ /*Passthru=*/Tuple.getOperand(2),
+ /*Ptr=*/
+ DAG.getNode(ISD::ADD, DL, XLenVT, Tuple.getOperand(3),
+ DAG.getConstant(Offset, DL, XLenVT)),
+ /*Stride=*/DAG.getConstant(Stride, DL, XLenVT),
+ /*Mask=*/Tuple.getOperand(4),
+ /*VL=*/Tuple.getOperand(5),
+ /*Policy=*/Tuple.getOperand(6)};
+
+ auto *TupleMemSD = cast<MemIntrinsicSDNode>(Tuple);
+ // Match getTgtMemIntrinsic for non-unit stride case
+ EVT MemVT = TupleMemSD->getMemoryVT().getScalarType();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ TupleMemSD->getMemOperand(), Offset, MemoryLocation::UnknownSize);
+
+ SDVTList VTs = DAG.getVTList({VT, MVT::Other});
+ SDValue Result = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
+ Ops, MemVT, MMO);
+ DAG.ReplaceAllUsesOfValueWith(Tuple.getValue(1), Result.getValue(1));
+ return Result.getValue(0);
+ }
+ case RISCVISD::TUPLE_INSERT: {
+ // tuple_insert tuple, undef, idx -> tuple
+ if (N->getOperand(1).isUndef())
+ return N->getOperand(0);
+ break;
+ }
}
return SDValue();
@@ -22367,6 +22455,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
case CallingConv::C:
case CallingConv::Fast:
case CallingConv::SPIR_KERNEL:
+ case CallingConv::PreserveMost:
case CallingConv::GRAAL:
case CallingConv::RISCV_VectorCall:
#define CC_VLS_CASE(ABI_VLEN) case CallingConv::RISCV_VLSCall_##ABI_VLEN:
@@ -22636,8 +22725,14 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
bool IsVarArg = CLI.IsVarArg;
EVT PtrVT = getPointerTy(DAG.getDataLayout());
MVT XLenVT = Subtarget.getXLenVT();
+ const CallBase *CB = CLI.CB;
MachineFunction &MF = DAG.getMachineFunction();
+ MachineFunction::CallSiteInfo CSInfo;
+
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
// Analyze the operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -22895,6 +22990,9 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
if (CLI.CFIType)
Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
DAG.addNoMergeSiteInfo(Ret.getNode(), CLI.NoMerge);
+ if (MF.getTarget().Options.EmitCallGraphSection && CB &&
+ CB->isIndirectCall())
+ DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
return Ret;
}
@@ -22902,6 +23000,10 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops);
if (CLI.CFIType)
Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue());
+
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
+
DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
Glue = Chain.getValue(1);
@@ -24260,7 +24362,7 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
return false;
EVT ScalarType = DataType.getScalarType();
- if (!isLegalLoadStoreElementTypeForRVV(ScalarType))
+ if (!isLegalElementTypeForRVV(ScalarType))
return false;
if (!Subtarget.enableUnalignedVectorMem() &&
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index a788c0b7..ca70c46 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -384,7 +384,6 @@ public:
bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
bool isLegalElementTypeForRVV(EVT ScalarTy) const;
- bool isLegalLoadStoreElementTypeForRVV(EVT ScalarTy) const;
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 6afc942d..03e6f43 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1510,21 +1510,6 @@ class VPseudoTiedBinaryCarryIn<VReg RetClass,
let VLMul = MInfo.value;
}
-class VPseudoTernaryNoMask<VReg RetClass,
- RegisterClass Op1Class,
- DAGOperand Op2Class,
- string Constraint> :
- RISCVVPseudo<(outs RetClass:$rd),
- (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
- AVL:$vl, sew:$sew)> {
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
- let HasVLOp = 1;
- let HasSEWOp = 1;
-}
-
class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
index f391300..5265613 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
@@ -1120,27 +1120,11 @@ let Predicates = [HasVendorXqcisync, IsRV32] in {
def QC_C_SYNCWF : QCIRVInst16CBSYNC<0b100, "qc.c.syncwf">;
def QC_C_SYNCWL : QCIRVInst16CBSYNC<0b101, "qc.c.syncwl">;
- let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in
- def QC_C_DELAY : RVInst16CI<0b000, 0b10, (outs),
- (ins uimm5nonzero:$imm),
- "qc.c.delay", "$imm"> {
- let Inst{12} = 0;
- let Inst{11-7} = 0;
- let Inst{6-2} = imm{4-0};
- }
+ // qc.c.delay implemented as an alias, below
} // Predicates = [HasVendorXqcisync, IsRV32]
let Predicates = [HasVendorXqcisim, IsRV32] in {
let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in {
- def QC_PSYSCALLI : RVInstI<0b010, OPC_OP_IMM, (outs), (ins uimm10:$imm10),
- "qc.psyscalli", "$imm10"> {
- bits<10> imm10;
-
- let rs1 = 0;
- let rd = 0;
- let imm12 = {0b00, imm10};
- }
-
def QC_PPUTCI : RVInstI<0b010, OPC_OP_IMM, (outs), (ins uimm8:$imm8),
"qc.pputci", "$imm8"> {
bits<8> imm8;
@@ -1150,18 +1134,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in {
let imm12 = {0b0100, imm8};
}
- def QC_PCOREDUMP : QCISim_NONE<0b0110, "qc.pcoredump">;
- def QC_PPREGS : QCISim_NONE<0b0111, "qc.ppregs">;
- def QC_PPREG : QCISim_RS1<0b1000, "qc.ppreg">;
- def QC_PPUTC : QCISim_RS1<0b1001, "qc.pputc">;
- def QC_PPUTS : QCISim_RS1<0b1010, "qc.pputs">;
- def QC_PEXIT : QCISim_RS1<0b1011, "qc.pexit">;
- def QC_PSYSCALL : QCISim_RS1<0b1100, "qc.psyscall">;
-
- def QC_C_PTRACE : RVInst16CI<0b000, 0b10, (outs), (ins), "qc.c.ptrace", ""> {
- let rd = 0;
- let imm = 0;
- }
+ // The other instructions are all implemented as aliases, below
} // mayLoad = 0, mayStore = 0, hasSideEffects = 1
} // Predicates = [HasVendorXqcisim, IsRV32]
@@ -1218,6 +1191,27 @@ let EmitPriority = 0 in {
} // EmitPriority = 0
} // Predicates = [HasVendorXqcilo, IsRV32]
+let Predicates = [HasVendorXqcisim, IsRV32] in {
+let EmitPriority = 1 in {
+ def : InstAlias<"qc.c.ptrace", (C_SLLI X0, 0)>;
+
+ def : InstAlias<"qc.psyscalli $imm", (SLTI X0, X0, uimm10:$imm)>;
+ def : InstAlias<"qc.pcoredump", (SLTI X0, X0, 1536)>;
+ def : InstAlias<"qc.ppregs", (SLTI X0, X0, 1792)>;
+ def : InstAlias<"qc.ppreg $rs1", (SLTI X0, GPR:$rs1, -2048)>;
+ def : InstAlias<"qc.pputc $rs1", (SLTI X0, GPR:$rs1, -1792)>;
+ def : InstAlias<"qc.pputs $rs1", (SLTI X0, GPR:$rs1, -1536)>;
+ def : InstAlias<"qc.pexit $rs1", (SLTI X0, GPR:$rs1, -1280)>;
+ def : InstAlias<"qc.psyscall $rs1", (SLTI X0, GPR:$rs1, -1024)>;
+} // EmitPriority = 1
+} // Predicates = [HasVendorXqcisim, IsRV32]
+
+let Predicates = [HasVendorXqcisync, IsRV32] in {
+let EmitPriority = 1 in {
+ def : InstAlias<"qc.c.delay $imm", (C_SLLI X0, uimm5nonzero:$imm)>;
+}
+} // Predicates = [HasVendorXqcisync, IsRV32]
+
//===----------------------------------------------------------------------===//
// Pseudo-instructions
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
index 3cbe668..726920e 100644
--- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
@@ -32,7 +32,7 @@ bool RISCVTargetLowering::isLegalInterleavedAccessType(
if (!isTypeLegal(VT))
return false;
- if (!isLegalLoadStoreElementTypeForRVV(VT.getScalarType()) ||
+ if (!isLegalElementTypeForRVV(VT.getScalarType()) ||
!allowsMemoryAccessForAlignment(VTy->getContext(), DL, VT, AddrSpace,
Alignment))
return false;
@@ -216,29 +216,6 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
if (!isLegalInterleavedAccessType(VTy, Factor, Alignment, AS, DL))
return false;
- // If the segment load is going to be performed segment at a time anyways
- // and there's only one element used, use a strided load instead. This
- // will be equally fast, and create less vector register pressure.
- if (Indices.size() == 1 && !Subtarget.hasOptimizedSegmentLoadStore(Factor)) {
- unsigned ScalarSizeInBytes = DL.getTypeStoreSize(VTy->getElementType());
- Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes);
- Value *Offset = ConstantInt::get(XLenTy, Indices[0] * ScalarSizeInBytes);
- Value *BasePtr = Builder.CreatePtrAdd(Ptr, Offset);
- // For rv64, need to truncate i64 to i32 to match signature. As VL is at most
- // the number of active lanes (which is bounded by i32) this is safe.
- VL = Builder.CreateTrunc(VL, Builder.getInt32Ty());
-
- CallInst *CI =
- Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_load,
- {VTy, BasePtr->getType(), Stride->getType()},
- {BasePtr, Stride, Mask, VL});
- Alignment = commonAlignment(Alignment, Indices[0] * ScalarSizeInBytes);
- CI->addParamAttr(0,
- Attribute::getWithAlignment(CI->getContext(), Alignment));
- Shuffles[0]->replaceAllUsesWith(CI);
- return true;
- };
-
CallInst *VlsegN = Builder.CreateIntrinsic(
FixedVlsegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy}, {Ptr, Mask, VL});
@@ -289,33 +266,6 @@ bool RISCVTargetLowering::lowerInterleavedStore(Instruction *Store,
if (!isLegalInterleavedAccessType(VTy, Factor, Alignment, AS, DL))
return false;
- unsigned Index;
- // If the segment store only has one active lane (i.e. the interleave is
- // just a spread shuffle), we can use a strided store instead. This will
- // be equally fast, and create less vector register pressure.
- if (!Subtarget.hasOptimizedSegmentLoadStore(Factor) &&
- isSpreadMask(Mask, Factor, Index)) {
- unsigned ScalarSizeInBytes =
- DL.getTypeStoreSize(ShuffleVTy->getElementType());
- Value *Data = SVI->getOperand(0);
- Data = Builder.CreateExtractVector(VTy, Data, uint64_t(0));
- Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes);
- Value *Offset = ConstantInt::get(XLenTy, Index * ScalarSizeInBytes);
- Value *BasePtr = Builder.CreatePtrAdd(Ptr, Offset);
- // For rv64, need to truncate i64 to i32 to match signature. As VL is at
- // most the number of active lanes (which is bounded by i32) this is safe.
- VL = Builder.CreateTrunc(VL, Builder.getInt32Ty());
-
- CallInst *CI =
- Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_store,
- {VTy, BasePtr->getType(), Stride->getType()},
- {Data, BasePtr, Stride, LaneMask, VL});
- Alignment = commonAlignment(Alignment, Index * ScalarSizeInBytes);
- CI->addParamAttr(1,
- Attribute::getWithAlignment(CI->getContext(), Alignment));
- return true;
- }
-
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
Store->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy});
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 5404123..7e58b6f 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -68,6 +68,9 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
if (MF->getFunction().getCallingConv() == CallingConv::GHC)
return CSR_NoRegs_SaveList;
+ if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
+ return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
+ : CSR_RT_MostRegs_SaveList;
if (MF->getFunction().hasFnAttribute("interrupt")) {
if (Subtarget.hasVInstructions()) {
if (Subtarget.hasStdExtD())
@@ -573,6 +576,7 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int64_t Val = Offset.getFixed();
int64_t Lo12 = SignExtend64<12>(Val);
unsigned Opc = MI.getOpcode();
+
if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
// We chose to emit the canonical immediate sequence rather than folding
// the offset into the using add under the theory that doing so doesn't
@@ -585,6 +589,9 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
(Lo12 & 0b11111) != 0) {
// Prefetch instructions require the offset to be 32 byte aligned.
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
+ } else if (Opc == RISCV::MIPS_PREFETCH && !isUInt<9>(Val)) {
+ // MIPS Prefetch instructions require the offset to be 9 bits encoded.
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
} else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
Opc == RISCV::PseudoRV32ZdinxSD) &&
Lo12 >= 2044) {
@@ -811,7 +818,13 @@ RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
if (CC == CallingConv::GHC)
return CSR_NoRegs_RegMask;
- switch (Subtarget.getTargetABI()) {
+ RISCVABI::ABI ABI = Subtarget.getTargetABI();
+ if (CC == CallingConv::PreserveMost) {
+ if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
+ return CSR_RT_MostRegs_RVE_RegMask;
+ return CSR_RT_MostRegs_RegMask;
+ }
+ switch (ABI) {
default:
llvm_unreachable("Unrecognized ABI");
case RISCVABI::ABI_ILP32E:
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index fd634b5..0d5eb86 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1191,9 +1191,6 @@ static const CostTblEntry VectorIntrinsicCostTable[]{
{Intrinsic::roundeven, MVT::f64, 9},
{Intrinsic::rint, MVT::f32, 7},
{Intrinsic::rint, MVT::f64, 7},
- {Intrinsic::lrint, MVT::i32, 1},
- {Intrinsic::lrint, MVT::i64, 1},
- {Intrinsic::llrint, MVT::i64, 1},
{Intrinsic::nearbyint, MVT::f32, 9},
{Intrinsic::nearbyint, MVT::f64, 9},
{Intrinsic::bswap, MVT::i16, 3},
@@ -1251,11 +1248,48 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
switch (ICA.getID()) {
case Intrinsic::lrint:
case Intrinsic::llrint:
- // We can't currently lower half or bfloat vector lrint/llrint.
- if (auto *VecTy = dyn_cast<VectorType>(ICA.getArgTypes()[0]);
- VecTy && VecTy->getElementType()->is16bitFPTy())
- return InstructionCost::getInvalid();
- [[fallthrough]];
+ case Intrinsic::lround:
+ case Intrinsic::llround: {
+ auto LT = getTypeLegalizationCost(RetTy);
+ Type *SrcTy = ICA.getArgTypes().front();
+ auto SrcLT = getTypeLegalizationCost(SrcTy);
+ if (ST->hasVInstructions() && LT.second.isVector()) {
+ SmallVector<unsigned, 2> Ops;
+ unsigned SrcEltSz = DL.getTypeSizeInBits(SrcTy->getScalarType());
+ unsigned DstEltSz = DL.getTypeSizeInBits(RetTy->getScalarType());
+ if (LT.second.getVectorElementType() == MVT::bf16) {
+ if (!ST->hasVInstructionsBF16Minimal())
+ return InstructionCost::getInvalid();
+ if (DstEltSz == 32)
+ Ops = {RISCV::VFWCVTBF16_F_F_V, RISCV::VFCVT_X_F_V};
+ else
+ Ops = {RISCV::VFWCVTBF16_F_F_V, RISCV::VFWCVT_X_F_V};
+ } else if (LT.second.getVectorElementType() == MVT::f16 &&
+ !ST->hasVInstructionsF16()) {
+ if (!ST->hasVInstructionsF16Minimal())
+ return InstructionCost::getInvalid();
+ if (DstEltSz == 32)
+ Ops = {RISCV::VFWCVT_F_F_V, RISCV::VFCVT_X_F_V};
+ else
+ Ops = {RISCV::VFWCVT_F_F_V, RISCV::VFWCVT_X_F_V};
+
+ } else if (SrcEltSz > DstEltSz) {
+ Ops = {RISCV::VFNCVT_X_F_W};
+ } else if (SrcEltSz < DstEltSz) {
+ Ops = {RISCV::VFWCVT_X_F_V};
+ } else {
+ Ops = {RISCV::VFCVT_X_F_V};
+ }
+
+ // We need to use the source LMUL in the case of a narrowing op, and the
+ // destination LMUL otherwise.
+ if (SrcEltSz > DstEltSz)
+ return SrcLT.first *
+ getRISCVInstructionCost(Ops, SrcLT.second, CostKind);
+ return LT.first * getRISCVInstructionCost(Ops, LT.second, CostKind);
+ }
+ break;
+ }
case Intrinsic::ceil:
case Intrinsic::floor:
case Intrinsic::trunc:
@@ -2593,18 +2627,17 @@ void RISCVTTIImpl::getUnrollingPreferences(
if (L->getNumBlocks() > 4)
return;
- // Don't unroll vectorized loops, including the remainder loop
- if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
- return;
-
// Scan the loop: don't unroll loops with calls as this could prevent
- // inlining.
+ // inlining. Don't unroll auto-vectorized loops either, though do allow
+ // unrolling of the scalar remainder.
+ bool IsVectorized = getBooleanLoopAttribute(L, "llvm.loop.isvectorized");
InstructionCost Cost = 0;
for (auto *BB : L->getBlocks()) {
for (auto &I : *BB) {
- // Initial setting - Don't unroll loops containing vectorized
- // instructions.
- if (I.getType()->isVectorTy())
+ // Both auto-vectorized loops and the scalar remainder have the
+ // isvectorized attribute, so differentiate between them by the presence
+ // of vector instructions.
+ if (IsVectorized && I.getType()->isVectorTy())
return;
if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index f0510ec..d62d99c 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -265,7 +265,7 @@ public:
if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
return false;
- return TLI->isLegalLoadStoreElementTypeForRVV(ElemType);
+ return TLI->isLegalElementTypeForRVV(ElemType);
}
bool isLegalMaskedLoad(Type *DataType, Align Alignment,
@@ -297,7 +297,7 @@ public:
if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
return false;
- return TLI->isLegalLoadStoreElementTypeForRVV(ElemType);
+ return TLI->isLegalElementTypeForRVV(ElemType);
}
bool isLegalMaskedGather(Type *DataType, Align Alignment) const override {
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index c1cc19b..050de3d 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -646,8 +646,7 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) {
if (!Src || Src->hasUnmodeledSideEffects() ||
Src->getParent() != MI.getParent() ||
!RISCVII::isFirstDefTiedToFirstUse(Src->getDesc()) ||
- !RISCVII::hasVLOp(Src->getDesc().TSFlags) ||
- !RISCVII::hasVecPolicyOp(Src->getDesc().TSFlags))
+ !RISCVII::hasVLOp(Src->getDesc().TSFlags))
return false;
// Src's dest needs to have the same EEW as MI's input.
@@ -681,12 +680,14 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) {
*Src->getParent()->getParent()));
}
- // If MI was tail agnostic and the VL didn't increase, preserve it.
- int64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
- if ((MI.getOperand(5).getImm() & RISCVVType::TAIL_AGNOSTIC) &&
- RISCV::isVLKnownLE(MI.getOperand(3), SrcVL))
- Policy |= RISCVVType::TAIL_AGNOSTIC;
- Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy);
+ if (RISCVII::hasVecPolicyOp(Src->getDesc().TSFlags)) {
+ // If MI was tail agnostic and the VL didn't increase, preserve it.
+ int64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ if ((MI.getOperand(5).getImm() & RISCVVType::TAIL_AGNOSTIC) &&
+ RISCV::isVLKnownLE(MI.getOperand(3), SrcVL))
+ Policy |= RISCVVType::TAIL_AGNOSTIC;
+ Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy);
+ }
MRI->constrainRegClass(Src->getOperand(0).getReg(),
MRI->getRegClass(MI.getOperand(0).getReg()));
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
index 5cda6a0..7505507 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
@@ -74,17 +74,20 @@ class SPIRVLegalizePointerCast : public FunctionPass {
// Returns the loaded value.
Value *loadVectorFromVector(IRBuilder<> &B, FixedVectorType *SourceType,
FixedVectorType *TargetType, Value *Source) {
- // We expect the codegen to avoid doing implicit bitcast from a load.
- assert(TargetType->getElementType() == SourceType->getElementType());
- assert(TargetType->getNumElements() < SourceType->getNumElements());
-
+ assert(TargetType->getNumElements() <= SourceType->getNumElements());
LoadInst *NewLoad = B.CreateLoad(SourceType, Source);
buildAssignType(B, SourceType, NewLoad);
+ Value *AssignValue = NewLoad;
+ if (TargetType->getElementType() != SourceType->getElementType()) {
+ AssignValue = B.CreateIntrinsic(Intrinsic::spv_bitcast,
+ {TargetType, SourceType}, {NewLoad});
+ buildAssignType(B, TargetType, AssignValue);
+ }
SmallVector<int> Mask(/* Size= */ TargetType->getNumElements());
for (unsigned I = 0; I < TargetType->getNumElements(); ++I)
Mask[I] = I;
- Value *Output = B.CreateShuffleVector(NewLoad, NewLoad, Mask);
+ Value *Output = B.CreateShuffleVector(AssignValue, AssignValue, Mask);
buildAssignType(B, TargetType, Output);
return Output;
}
@@ -135,8 +138,9 @@ class SPIRVLegalizePointerCast : public FunctionPass {
Output = loadFirstValueFromAggregate(B, SVT->getElementType(),
OriginalOperand, LI);
}
- // Destination is a smaller vector than source.
+ // Destination is a smaller vector than source or different vector type.
// - float3 v3 = vector4;
+ // - float4 v2 = int4;
else if (SVT && DVT)
Output = loadVectorFromVector(B, SVT, DVT, OriginalOperand);
// Destination is the scalar type stored at the start of an aggregate.
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index 721f64a..1995e0f 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -335,6 +335,8 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
getActionDefinitionsBuilder({G_SMULH, G_UMULH}).alwaysLegal();
}
+ getActionDefinitionsBuilder(G_IS_FPCLASS).custom();
+
getLegacyLegalizerInfo().computeTables();
verify(*ST.getInstrInfo());
}
@@ -355,9 +357,14 @@ static Register convertPtrToInt(Register Reg, LLT ConvTy, SPIRVType *SpvType,
bool SPIRVLegalizerInfo::legalizeCustom(
LegalizerHelper &Helper, MachineInstr &MI,
LostDebugLocObserver &LocObserver) const {
- auto Opc = MI.getOpcode();
MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
- if (Opc == TargetOpcode::G_ICMP) {
+ switch (MI.getOpcode()) {
+ default:
+ // TODO: implement legalization for other opcodes.
+ return true;
+ case TargetOpcode::G_IS_FPCLASS:
+ return legalizeIsFPClass(Helper, MI, LocObserver);
+ case TargetOpcode::G_ICMP: {
assert(GR->getSPIRVTypeForVReg(MI.getOperand(0).getReg()));
auto &Op0 = MI.getOperand(2);
auto &Op1 = MI.getOperand(3);
@@ -378,6 +385,238 @@ bool SPIRVLegalizerInfo::legalizeCustom(
}
return true;
}
- // TODO: implement legalization for other opcodes.
+ }
+}
+
+// Note this code was copied from LegalizerHelper::lowerISFPCLASS and adjusted
+// to ensure that all instructions created during the lowering have SPIR-V types
+// assigned to them.
+bool SPIRVLegalizerInfo::legalizeIsFPClass(
+ LegalizerHelper &Helper, MachineInstr &MI,
+ LostDebugLocObserver &LocObserver) const {
+ auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
+ FPClassTest Mask = static_cast<FPClassTest>(MI.getOperand(2).getImm());
+
+ auto &MIRBuilder = Helper.MIRBuilder;
+ auto &MF = MIRBuilder.getMF();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ Type *LLVMDstTy =
+ IntegerType::get(MIRBuilder.getContext(), DstTy.getScalarSizeInBits());
+ if (DstTy.isVector())
+ LLVMDstTy = VectorType::get(LLVMDstTy, DstTy.getElementCount());
+ SPIRVType *SPIRVDstTy = GR->getOrCreateSPIRVType(
+ LLVMDstTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
+ /*EmitIR*/ true);
+
+ unsigned BitSize = SrcTy.getScalarSizeInBits();
+ const fltSemantics &Semantics = getFltSemanticForLLT(SrcTy.getScalarType());
+
+ LLT IntTy = LLT::scalar(BitSize);
+ Type *LLVMIntTy = IntegerType::get(MIRBuilder.getContext(), BitSize);
+ if (SrcTy.isVector()) {
+ IntTy = LLT::vector(SrcTy.getElementCount(), IntTy);
+ LLVMIntTy = VectorType::get(LLVMIntTy, SrcTy.getElementCount());
+ }
+ SPIRVType *SPIRVIntTy = GR->getOrCreateSPIRVType(
+ LLVMIntTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
+ /*EmitIR*/ true);
+
+ // Clang doesn't support capture of structured bindings:
+ LLT DstTyCopy = DstTy;
+ const auto assignSPIRVTy = [&](MachineInstrBuilder &&MI) {
+ // Assign this MI's (assumed only) destination to one of the two types we
+ // expect: either the G_IS_FPCLASS's destination type, or the integer type
+ // bitcast from the source type.
+ LLT MITy = MRI.getType(MI.getReg(0));
+ assert((MITy == IntTy || MITy == DstTyCopy) &&
+ "Unexpected LLT type while lowering G_IS_FPCLASS");
+ auto *SPVTy = MITy == IntTy ? SPIRVIntTy : SPIRVDstTy;
+ GR->assignSPIRVTypeToVReg(SPVTy, MI.getReg(0), MF);
+ return MI;
+ };
+
+ // Helper to build and assign a constant in one go
+ const auto buildSPIRVConstant = [&](LLT Ty, auto &&C) -> MachineInstrBuilder {
+ if (!Ty.isFixedVector())
+ return assignSPIRVTy(MIRBuilder.buildConstant(Ty, C));
+ auto ScalarC = MIRBuilder.buildConstant(Ty.getScalarType(), C);
+ assert((Ty == IntTy || Ty == DstTyCopy) &&
+ "Unexpected LLT type while lowering constant for G_IS_FPCLASS");
+ SPIRVType *VecEltTy = GR->getOrCreateSPIRVType(
+ (Ty == IntTy ? LLVMIntTy : LLVMDstTy)->getScalarType(), MIRBuilder,
+ SPIRV::AccessQualifier::ReadWrite,
+ /*EmitIR*/ true);
+ GR->assignSPIRVTypeToVReg(VecEltTy, ScalarC.getReg(0), MF);
+ return assignSPIRVTy(MIRBuilder.buildSplatBuildVector(Ty, ScalarC));
+ };
+
+ if (Mask == fcNone) {
+ MIRBuilder.buildCopy(DstReg, buildSPIRVConstant(DstTy, 0));
+ MI.eraseFromParent();
+ return true;
+ }
+ if (Mask == fcAllFlags) {
+ MIRBuilder.buildCopy(DstReg, buildSPIRVConstant(DstTy, 1));
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // Note that rather than creating a COPY here (between a floating-point and
+ // integer type of the same size) we create a SPIR-V bitcast immediately. We
+ // can't create a G_BITCAST because the LLTs are the same, and we can't seem
+ // to correctly lower COPYs to SPIR-V bitcasts at this moment.
+ Register ResVReg = MRI.createGenericVirtualRegister(IntTy);
+ MRI.setRegClass(ResVReg, GR->getRegClass(SPIRVIntTy));
+ GR->assignSPIRVTypeToVReg(SPIRVIntTy, ResVReg, Helper.MIRBuilder.getMF());
+ auto AsInt = MIRBuilder.buildInstr(SPIRV::OpBitcast)
+ .addDef(ResVReg)
+ .addUse(GR->getSPIRVTypeID(SPIRVIntTy))
+ .addUse(SrcReg);
+ AsInt = assignSPIRVTy(std::move(AsInt));
+
+ // Various masks.
+ APInt SignBit = APInt::getSignMask(BitSize);
+ APInt ValueMask = APInt::getSignedMaxValue(BitSize); // All bits but sign.
+ APInt Inf = APFloat::getInf(Semantics).bitcastToAPInt(); // Exp and int bit.
+ APInt ExpMask = Inf;
+ APInt AllOneMantissa = APFloat::getLargest(Semantics).bitcastToAPInt() & ~Inf;
+ APInt QNaNBitMask =
+ APInt::getOneBitSet(BitSize, AllOneMantissa.getActiveBits() - 1);
+ APInt InversionMask = APInt::getAllOnes(DstTy.getScalarSizeInBits());
+
+ auto SignBitC = buildSPIRVConstant(IntTy, SignBit);
+ auto ValueMaskC = buildSPIRVConstant(IntTy, ValueMask);
+ auto InfC = buildSPIRVConstant(IntTy, Inf);
+ auto ExpMaskC = buildSPIRVConstant(IntTy, ExpMask);
+ auto ZeroC = buildSPIRVConstant(IntTy, 0);
+
+ auto Abs = assignSPIRVTy(MIRBuilder.buildAnd(IntTy, AsInt, ValueMaskC));
+ auto Sign = assignSPIRVTy(
+ MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_NE, DstTy, AsInt, Abs));
+
+ auto Res = buildSPIRVConstant(DstTy, 0);
+
+ const auto appendToRes = [&](MachineInstrBuilder &&ToAppend) {
+ Res = assignSPIRVTy(
+ MIRBuilder.buildOr(DstTyCopy, Res, assignSPIRVTy(std::move(ToAppend))));
+ };
+
+ // Tests that involve more than one class should be processed first.
+ if ((Mask & fcFinite) == fcFinite) {
+ // finite(V) ==> abs(V) u< exp_mask
+ appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, Abs,
+ ExpMaskC));
+ Mask &= ~fcFinite;
+ } else if ((Mask & fcFinite) == fcPosFinite) {
+ // finite(V) && V > 0 ==> V u< exp_mask
+ appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, AsInt,
+ ExpMaskC));
+ Mask &= ~fcPosFinite;
+ } else if ((Mask & fcFinite) == fcNegFinite) {
+ // finite(V) && V < 0 ==> abs(V) u< exp_mask && signbit == 1
+ auto Cmp = assignSPIRVTy(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT,
+ DstTy, Abs, ExpMaskC));
+ appendToRes(MIRBuilder.buildAnd(DstTy, Cmp, Sign));
+ Mask &= ~fcNegFinite;
+ }
+
+ if (FPClassTest PartialCheck = Mask & (fcZero | fcSubnormal)) {
+ // fcZero | fcSubnormal => test all exponent bits are 0
+ // TODO: Handle sign bit specific cases
+ // TODO: Handle inverted case
+ if (PartialCheck == (fcZero | fcSubnormal)) {
+ auto ExpBits = assignSPIRVTy(MIRBuilder.buildAnd(IntTy, AsInt, ExpMaskC));
+ appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
+ ExpBits, ZeroC));
+ Mask &= ~PartialCheck;
+ }
+ }
+
+ // Check for individual classes.
+ if (FPClassTest PartialCheck = Mask & fcZero) {
+ if (PartialCheck == fcPosZero)
+ appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
+ AsInt, ZeroC));
+ else if (PartialCheck == fcZero)
+ appendToRes(
+ MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy, Abs, ZeroC));
+ else // fcNegZero
+ appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
+ AsInt, SignBitC));
+ }
+
+ if (FPClassTest PartialCheck = Mask & fcSubnormal) {
+ // issubnormal(V) ==> unsigned(abs(V) - 1) u< (all mantissa bits set)
+ // issubnormal(V) && V>0 ==> unsigned(V - 1) u< (all mantissa bits set)
+ auto V = (PartialCheck == fcPosSubnormal) ? AsInt : Abs;
+ auto OneC = buildSPIRVConstant(IntTy, 1);
+ auto VMinusOne = MIRBuilder.buildSub(IntTy, V, OneC);
+ auto SubnormalRes = assignSPIRVTy(
+ MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, VMinusOne,
+ buildSPIRVConstant(IntTy, AllOneMantissa)));
+ if (PartialCheck == fcNegSubnormal)
+ SubnormalRes = MIRBuilder.buildAnd(DstTy, SubnormalRes, Sign);
+ appendToRes(std::move(SubnormalRes));
+ }
+
+ if (FPClassTest PartialCheck = Mask & fcInf) {
+ if (PartialCheck == fcPosInf)
+ appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
+ AsInt, InfC));
+ else if (PartialCheck == fcInf)
+ appendToRes(
+ MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy, Abs, InfC));
+ else { // fcNegInf
+ APInt NegInf = APFloat::getInf(Semantics, true).bitcastToAPInt();
+ auto NegInfC = buildSPIRVConstant(IntTy, NegInf);
+ appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
+ AsInt, NegInfC));
+ }
+ }
+
+ if (FPClassTest PartialCheck = Mask & fcNan) {
+ auto InfWithQnanBitC = buildSPIRVConstant(IntTy, Inf | QNaNBitMask);
+ if (PartialCheck == fcNan) {
+ // isnan(V) ==> abs(V) u> int(inf)
+ appendToRes(
+ MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_UGT, DstTy, Abs, InfC));
+ } else if (PartialCheck == fcQNan) {
+ // isquiet(V) ==> abs(V) u>= (unsigned(Inf) | quiet_bit)
+ appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_UGE, DstTy, Abs,
+ InfWithQnanBitC));
+ } else { // fcSNan
+ // issignaling(V) ==> abs(V) u> unsigned(Inf) &&
+ // abs(V) u< (unsigned(Inf) | quiet_bit)
+ auto IsNan = assignSPIRVTy(
+ MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_UGT, DstTy, Abs, InfC));
+ auto IsNotQnan = assignSPIRVTy(MIRBuilder.buildICmp(
+ CmpInst::Predicate::ICMP_ULT, DstTy, Abs, InfWithQnanBitC));
+ appendToRes(MIRBuilder.buildAnd(DstTy, IsNan, IsNotQnan));
+ }
+ }
+
+ if (FPClassTest PartialCheck = Mask & fcNormal) {
+ // isnormal(V) ==> (0 u< exp u< max_exp) ==> (unsigned(exp-1) u<
+ // (max_exp-1))
+ APInt ExpLSB = ExpMask & ~(ExpMask.shl(1));
+ auto ExpMinusOne = assignSPIRVTy(
+ MIRBuilder.buildSub(IntTy, Abs, buildSPIRVConstant(IntTy, ExpLSB)));
+ APInt MaxExpMinusOne = ExpMask - ExpLSB;
+ auto NormalRes = assignSPIRVTy(
+ MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, ExpMinusOne,
+ buildSPIRVConstant(IntTy, MaxExpMinusOne)));
+ if (PartialCheck == fcNegNormal)
+ NormalRes = MIRBuilder.buildAnd(DstTy, NormalRes, Sign);
+ else if (PartialCheck == fcPosNormal) {
+ auto PosSign = assignSPIRVTy(MIRBuilder.buildXor(
+ DstTy, Sign, buildSPIRVConstant(DstTy, InversionMask)));
+ NormalRes = MIRBuilder.buildAnd(DstTy, NormalRes, PosSign);
+ }
+ appendToRes(std::move(NormalRes));
+ }
+
+ MIRBuilder.buildCopy(DstReg, Res);
+ MI.eraseFromParent();
return true;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.h b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.h
index 6335f21..eeefa42 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.h
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.h
@@ -30,6 +30,10 @@ public:
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI,
LostDebugLocObserver &LocObserver) const override;
SPIRVLegalizerInfo(const SPIRVSubtarget &ST);
+
+private:
+ bool legalizeIsFPClass(LegalizerHelper &Helper, MachineInstr &MI,
+ LostDebugLocObserver &LocObserver) const;
};
} // namespace llvm
#endif // LLVM_LIB_TARGET_SPIRV_SPIRVMACHINELEGALIZER_H
diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h b/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
index 43bf6e9..60c4e2d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
+++ b/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
@@ -59,6 +59,8 @@ public:
Intrinsic::ID IID) const override;
Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
Value *NewV) const override;
+
+ bool allowVectorElementIndexingUsingGEP() const override { return false; }
};
} // namespace llvm
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index e30d723..fb0a47d 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -9044,7 +9044,7 @@ static unsigned detectEvenOddMultiplyOperand(const SelectionDAG &DAG,
if (unsigned(ShuffleMask[Elt]) != 2 * Elt)
CanUseEven = false;
if (unsigned(ShuffleMask[Elt]) != 2 * Elt + 1)
- CanUseEven = true;
+ CanUseOdd = false;
}
Op = Op.getOperand(0);
if (CanUseEven)
diff --git a/llvm/lib/Target/WebAssembly/WebAssembly.td b/llvm/lib/Target/WebAssembly/WebAssembly.td
index a606209..089be5f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssembly.td
+++ b/llvm/lib/Target/WebAssembly/WebAssembly.td
@@ -49,6 +49,8 @@ def FeatureFP16 :
SubtargetFeature<"fp16", "HasFP16", "true",
"Enable FP16 instructions">;
+def FeatureGC : SubtargetFeature<"gc", "HasGC", "true", "Enable wasm gc">;
+
def FeatureMultiMemory :
SubtargetFeature<"multimemory", "HasMultiMemory", "true",
"Enable multiple memories">;
@@ -71,7 +73,6 @@ def FeatureReferenceTypes :
SubtargetFeature<"reference-types", "HasReferenceTypes", "true",
"Enable reference types">;
-def FeatureGC : SubtargetFeature<"gc", "HasGC", "true", "Enable wasm gc">;
def FeatureRelaxedSIMD :
SubtargetFeature<"relaxed-simd", "SIMDLevel", "RelaxedSIMD",
"Enable relaxed-simd instructions">;
@@ -139,10 +140,10 @@ def : ProcessorModel<"lime1", NoSchedModel,
def : ProcessorModel<"bleeding-edge", NoSchedModel,
[FeatureAtomics, FeatureBulkMemory, FeatureBulkMemoryOpt,
FeatureCallIndirectOverlong, FeatureExceptionHandling,
- FeatureExtendedConst, FeatureFP16, FeatureMultiMemory,
- FeatureMultivalue, FeatureMutableGlobals,
+ FeatureExtendedConst, FeatureFP16, FeatureGC,
+ FeatureMultiMemory, FeatureMultivalue, FeatureMutableGlobals,
FeatureNontrappingFPToInt, FeatureRelaxedSIMD,
- FeatureReferenceTypes, FeatureGC, FeatureSIMD128,
+ FeatureReferenceTypes, FeatureSIMD128,
FeatureSignExt, FeatureTailCall]>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
index 2b632fd..13d048a 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
@@ -50,6 +50,9 @@ def HasFP16 :
Predicate<"Subtarget->hasFP16()">,
AssemblerPredicate<(all_of FeatureFP16), "fp16">;
+def HasGC : Predicate<"Subtarget->hasGC()">,
+ AssemblerPredicate<(all_of FeatureGC), "gc">;
+
def HasMultiMemory :
Predicate<"Subtarget->hasMultiMemory()">,
AssemblerPredicate<(all_of FeatureMultiMemory), "multimemory">;
@@ -76,9 +79,6 @@ def HasReferenceTypes :
Predicate<"Subtarget->hasReferenceTypes()">,
AssemblerPredicate<(all_of FeatureReferenceTypes), "reference-types">;
-def HasGC : Predicate<"Subtarget->hasGC()">,
- AssemblerPredicate<(all_of FeatureGC), "gc">;
-
def HasRelaxedSIMD :
Predicate<"Subtarget->hasRelaxedSIMD()">,
AssemblerPredicate<(all_of FeatureRelaxedSIMD), "relaxed-simd">;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
index f814274..2f88bbb 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
@@ -46,12 +46,12 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
bool HasExceptionHandling = false;
bool HasExtendedConst = false;
bool HasFP16 = false;
+ bool HasGC = false;
bool HasMultiMemory = false;
bool HasMultivalue = false;
bool HasMutableGlobals = false;
bool HasNontrappingFPToInt = false;
bool HasReferenceTypes = false;
- bool HasGC = false;
bool HasSignExt = false;
bool HasTailCall = false;
bool HasWideArithmetic = false;
diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index 1bf9f8b..f9bd233 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -104,6 +104,7 @@ add_llvm_target(X86CodeGen ${sources}
IRPrinter
Instrumentation
MC
+ ObjCARC
ProfileData
Scalar
SelectionDAG
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 0ff7f23..067bd43 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -3673,6 +3673,12 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
CLI.NumResultRegs = RVLocs.size();
CLI.Call = MIB;
+ // Add call site info for call graph section.
+ if (TM.Options.EmitCallGraphSection && CB && CB->isIndirectCall()) {
+ MachineFunction::CallSiteInfo CSInfo(*CB);
+ MF->addCallSiteInfo(CLI.Call, std::move(CSInfo));
+ }
+
return true;
}
@@ -4042,6 +4048,8 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
MO.setReg(IndexReg);
}
+ if (MI->isCall())
+ FuncInfo.MF->moveAdditionalCallInfo(MI, Result);
Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
Result->cloneInstrSymbols(*FuncInfo.MF, *MI);
MachineBasicBlock::iterator I(MI);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 11ab8dc..bbbb1d9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -58071,14 +58071,24 @@ static SDValue combineX86CloadCstore(SDNode *N, SelectionDAG &DAG) {
Ops[3] = Op1.getOperand(0);
Ops[4] = Op1.getOperand(1);
} else if (Op1.getOpcode() == ISD::AND && Sub.getValue(0).use_empty()) {
+ SDValue Src = Op1;
+ SDValue Op10 = Op1.getOperand(0);
+ if (Op10.getOpcode() == ISD::XOR && isAllOnesConstant(Op10.getOperand(1))) {
+ // res, flags2 = sub 0, (and (xor X, -1), Y)
+ // cload/cstore ..., cond_ne, flag2
+ // ->
+ // res, flags2 = sub 0, (and X, Y)
+ // cload/cstore ..., cond_e, flag2
+ Src = DAG.getNode(ISD::AND, DL, Op1.getValueType(), Op10.getOperand(0),
+ Op1.getOperand(1));
+ Ops[3] = DAG.getTargetConstant(X86::COND_E, DL, MVT::i8);
+ }
// res, flags2 = sub 0, (and X, Y)
- // cload/cstore ..., cond_ne, flag2
+ // cload/cstore ..., cc, flag2
// ->
- // res, flags2 = and X, Y
- // cload/cstore ..., cond_ne, flag2
- Ops[4] = DAG.getNode(X86ISD::AND, DL, Sub->getVTList(), Op1.getOperand(0),
- Op1.getOperand(1))
- .getValue(1);
+ // res, flags2 = cmp (and X, Y), 0
+ // cload/cstore ..., cc, flag2
+ Ops[4] = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Src, Sub.getOperand(0));
} else {
return SDValue();
}
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index b4639ac..5862c7e 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2060,6 +2060,10 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (CallConv == CallingConv::X86_INTR)
report_fatal_error("X86 interrupts may not be called directly");
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
+
if (IsIndirectCall && !IsWin64 &&
M->getModuleFlag("import-call-optimization"))
errorUnsupported(DAG, dl,
diff --git a/llvm/lib/TargetParser/Host.cpp b/llvm/lib/TargetParser/Host.cpp
index 78bd5b4..7e09d30 100644
--- a/llvm/lib/TargetParser/Host.cpp
+++ b/llvm/lib/TargetParser/Host.cpp
@@ -587,8 +587,9 @@ StringRef sys::detail::getHostCPUNameForBPF() {
#endif
}
-#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
- defined(_M_X64)
+#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
+ defined(_M_X64)) && \
+ !defined(_M_ARM64EC)
/// getX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in
/// the specified arguments. If we can't run cpuid on the host, return true.
@@ -1853,8 +1854,9 @@ VendorSignatures getVendorSignature(unsigned *MaxLeaf) {
} // namespace llvm
#endif
-#if defined(__i386__) || defined(_M_IX86) || \
- defined(__x86_64__) || defined(_M_X64)
+#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
+ defined(_M_X64)) && \
+ !defined(_M_ARM64EC)
StringMap<bool> sys::getHostCPUFeatures() {
unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
unsigned MaxLevel;
@@ -2147,7 +2149,8 @@ StringMap<bool> sys::getHostCPUFeatures() {
return Features;
}
-#elif defined(_WIN32) && (defined(__aarch64__) || defined(_M_ARM64))
+#elif defined(_WIN32) && (defined(__aarch64__) || defined(_M_ARM64) || \
+ defined(__arm64ec__) || defined(_M_ARM64EC))
StringMap<bool> sys::getHostCPUFeatures() {
StringMap<bool> Features;
diff --git a/llvm/lib/TargetParser/TargetParser.cpp b/llvm/lib/TargetParser/TargetParser.cpp
index e5c896f..126be71 100644
--- a/llvm/lib/TargetParser/TargetParser.cpp
+++ b/llvm/lib/TargetParser/TargetParser.cpp
@@ -446,6 +446,7 @@ void AMDGPU::fillAMDGPUFeatureMap(StringRef GPU, const Triple &T,
Features["tanh-insts"] = true;
Features["transpose-load-f4f6-insts"] = true;
Features["bf16-trans-insts"] = true;
+ Features["bf16-cvt-insts"] = true;
Features["fp8-conversion-insts"] = true;
Features["fp8e5m3-insts"] = true;
Features["permlane16-swap"] = true;
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index 7af5ba4..40a7f80 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -458,29 +458,19 @@ static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI,
// Check if this array of constants represents a cttz table.
// Iterate over the elements from \p Table by trying to find/match all
// the numbers from 0 to \p InputBits that should represent cttz results.
-static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul,
- uint64_t Shift, uint64_t InputBits) {
- unsigned Length = Table.getNumElements();
- if (Length < InputBits || Length > InputBits * 2)
- return false;
-
- APInt Mask = APInt::getBitsSetFrom(InputBits, Shift);
- unsigned Matched = 0;
-
- for (unsigned i = 0; i < Length; i++) {
- uint64_t Element = Table.getElementAsInteger(i);
- if (Element >= InputBits)
- continue;
-
- // Check if \p Element matches a concrete answer. It could fail for some
- // elements that are never accessed, so we keep iterating over each element
- // from the table. The number of matched elements should be equal to the
- // number of potential right answers which is \p InputBits actually.
- if ((((Mul << Element) & Mask.getZExtValue()) >> Shift) == i)
- Matched++;
+static bool isCTTZTable(Constant *Table, const APInt &Mul, const APInt &Shift,
+ const APInt &AndMask, Type *AccessTy,
+ unsigned InputBits, const APInt &GEPIdxFactor,
+ const DataLayout &DL) {
+ for (unsigned Idx = 0; Idx < InputBits; Idx++) {
+ APInt Index = (APInt(InputBits, 1).shl(Idx) * Mul).lshr(Shift) & AndMask;
+ ConstantInt *C = dyn_cast_or_null<ConstantInt>(
+ ConstantFoldLoadFromConst(Table, AccessTy, Index * GEPIdxFactor, DL));
+ if (!C || C->getValue() != Idx)
+ return false;
}
- return Matched == InputBits;
+ return true;
}
// Try to recognize table-based ctz implementation.
@@ -495,6 +485,11 @@ static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul,
// this can be lowered to `cttz` instruction.
// There is also a special case when the element is 0.
//
+// The (x & -x) sets the lowest non-zero bit to 1. The multiply is a de-bruijn
+// sequence that contains each pattern of bits in it. The shift extracts
+// the top bits after the multiply, and that index into the table should
+// represent the number of trailing zeros in the original number.
+//
// Here are some examples or LLVM IR for a 64-bit target:
//
// CASE 1:
@@ -536,8 +531,8 @@ static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul,
// i64 %shr
// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
//
-// All this can be lowered to @llvm.cttz.i32/64 intrinsic.
-static bool tryToRecognizeTableBasedCttz(Instruction &I) {
+// All these can be lowered to @llvm.cttz.i32/64 intrinsics.
+static bool tryToRecognizeTableBasedCttz(Instruction &I, const DataLayout &DL) {
LoadInst *LI = dyn_cast<LoadInst>(&I);
if (!LI)
return false;
@@ -547,53 +542,47 @@ static bool tryToRecognizeTableBasedCttz(Instruction &I) {
return false;
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getPointerOperand());
- if (!GEP || !GEP->hasNoUnsignedSignedWrap() || GEP->getNumIndices() != 2)
- return false;
-
- if (!GEP->getSourceElementType()->isArrayTy())
- return false;
-
- uint64_t ArraySize = GEP->getSourceElementType()->getArrayNumElements();
- if (ArraySize != 32 && ArraySize != 64)
+ if (!GEP || !GEP->hasNoUnsignedSignedWrap())
return false;
GlobalVariable *GVTable = dyn_cast<GlobalVariable>(GEP->getPointerOperand());
if (!GVTable || !GVTable->hasInitializer() || !GVTable->isConstant())
return false;
- ConstantDataArray *ConstData =
- dyn_cast<ConstantDataArray>(GVTable->getInitializer());
- if (!ConstData)
- return false;
-
- if (!match(GEP->idx_begin()->get(), m_ZeroInt()))
+ unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
+ APInt ModOffset(BW, 0);
+ SmallMapVector<Value *, APInt, 4> VarOffsets;
+ if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset) ||
+ VarOffsets.size() != 1 || ModOffset != 0)
return false;
+ auto [GepIdx, GEPScale] = VarOffsets.front();
- Value *Idx2 = std::next(GEP->idx_begin())->get();
Value *X1;
- uint64_t MulConst, ShiftConst;
- // FIXME: 64-bit targets have `i64` type for the GEP index, so this match will
- // probably fail for other (e.g. 32-bit) targets.
- if (!match(Idx2, m_ZExtOrSelf(
- m_LShr(m_Mul(m_c_And(m_Neg(m_Value(X1)), m_Deferred(X1)),
- m_ConstantInt(MulConst)),
- m_ConstantInt(ShiftConst)))))
+ const APInt *MulConst, *ShiftConst, *AndCst = nullptr;
+ // Check that the gep variable index is ((x & -x) * MulConst) >> ShiftConst.
+ // This might be extended to the pointer index type, and if the gep index type
+ // has been replaced with an i8 then a new And (and different ShiftConst) will
+ // be present.
+ auto MatchInner = m_LShr(
+ m_Mul(m_c_And(m_Neg(m_Value(X1)), m_Deferred(X1)), m_APInt(MulConst)),
+ m_APInt(ShiftConst));
+ if (!match(GepIdx, m_CastOrSelf(MatchInner)) &&
+ !match(GepIdx, m_CastOrSelf(m_And(MatchInner, m_APInt(AndCst)))))
return false;
unsigned InputBits = X1->getType()->getScalarSizeInBits();
- if (InputBits != 32 && InputBits != 64)
- return false;
-
- // Shift should extract top 5..7 bits.
- if (InputBits - Log2_32(InputBits) != ShiftConst &&
- InputBits - Log2_32(InputBits) - 1 != ShiftConst)
+ if (InputBits != 16 && InputBits != 32 && InputBits != 64 && InputBits != 128)
return false;
- if (!isCTTZTable(*ConstData, MulConst, ShiftConst, InputBits))
+ if (!GEPScale.isIntN(InputBits) ||
+ !isCTTZTable(GVTable->getInitializer(), *MulConst, *ShiftConst,
+ AndCst ? *AndCst : APInt::getAllOnes(InputBits), AccessType,
+ InputBits, GEPScale.zextOrTrunc(InputBits), DL))
return false;
- auto ZeroTableElem = ConstData->getElementAsInteger(0);
- bool DefinedForZero = ZeroTableElem == InputBits;
+ ConstantInt *ZeroTableElem = cast<ConstantInt>(
+ ConstantFoldLoadFromConst(GVTable->getInitializer(), AccessType, DL));
+ bool DefinedForZero = ZeroTableElem->getZExtValue() == InputBits;
IRBuilder<> B(LI);
ConstantInt *BoolConst = B.getInt1(!DefinedForZero);
@@ -607,8 +596,7 @@ static bool tryToRecognizeTableBasedCttz(Instruction &I) {
// If the value in elem 0 isn't the same as InputBits, we still want to
// produce the value from the table.
auto Cmp = B.CreateICmpEQ(X1, ConstantInt::get(XType, 0));
- auto Select =
- B.CreateSelect(Cmp, ConstantInt::get(XType, ZeroTableElem), Cttz);
+ auto Select = B.CreateSelect(Cmp, B.CreateZExt(ZeroTableElem, XType), Cttz);
// NOTE: If the table[0] is 0, but the cttz(0) is defined by the Target
// it should be handled as: `cttz(x) & (typeSize - 1)`.
@@ -1477,7 +1465,7 @@ static bool foldUnusualPatterns(Function &F, DominatorTree &DT,
MadeChange |= foldGuardedFunnelShift(I, DT);
MadeChange |= tryToRecognizePopCount(I);
MadeChange |= tryToFPToSat(I, TTI);
- MadeChange |= tryToRecognizeTableBasedCttz(I);
+ MadeChange |= tryToRecognizeTableBasedCttz(I, DL);
MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA, DT);
MadeChange |= foldPatternedLoads(I, DL);
MadeChange |= foldICmpOrChain(I, DL, TTI, AA, DT);
diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
index 64b33e4..ab906f9 100644
--- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
@@ -1568,7 +1568,7 @@ private:
if (DebugLoc SuspendLoc = S->getDebugLoc()) {
std::string LabelName =
("__coro_resume_" + Twine(SuspendIndex)).str();
- DILocation &DILoc = *SuspendLoc.get();
+ DILocation &DILoc = *SuspendLoc;
DILabel *ResumeLabel =
DBuilder.createLabel(DIS, LabelName, DILoc.getFile(),
SuspendLoc.getLine(), SuspendLoc.getCol(),
diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index 486205c..57844a1 100644
--- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -502,8 +502,7 @@ class LowerTypeTestsModule {
uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
TypeIdLowering importTypeId(StringRef TypeId);
void importTypeTest(CallInst *CI);
- void importFunction(Function *F, bool isJumpTableCanonical,
- std::vector<GlobalAlias *> &AliasesToErase);
+ void importFunction(Function *F, bool isJumpTableCanonical);
BitSetInfo
buildBitSet(Metadata *TypeId,
@@ -1103,9 +1102,8 @@ void LowerTypeTestsModule::maybeReplaceComdat(Function *F,
// ThinLTO backend: the function F has a jump table entry; update this module
// accordingly. isJumpTableCanonical describes the type of the jump table entry.
-void LowerTypeTestsModule::importFunction(
- Function *F, bool isJumpTableCanonical,
- std::vector<GlobalAlias *> &AliasesToErase) {
+void LowerTypeTestsModule::importFunction(Function *F,
+ bool isJumpTableCanonical) {
assert(F->getType()->getAddressSpace() == 0);
GlobalValue::VisibilityTypes Visibility = F->getVisibility();
@@ -1135,23 +1133,23 @@ void LowerTypeTestsModule::importFunction(
} else {
F->setName(Name + ".cfi");
maybeReplaceComdat(F, Name);
- F->setLinkage(GlobalValue::ExternalLinkage);
FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
F->getAddressSpace(), Name, &M);
FDecl->setVisibility(Visibility);
Visibility = GlobalValue::HiddenVisibility;
- // Delete aliases pointing to this function, they'll be re-created in the
- // merged output. Don't do it yet though because ScopedSaveAliaseesAndUsed
- // will want to reset the aliasees first.
+ // Update aliases pointing to this function to also include the ".cfi" suffix,
+ // We expect the jump table entry to either point to the real function or an
+ // alias. Redirect all other users to the jump table entry.
for (auto &U : F->uses()) {
if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
+ std::string AliasName = A->getName().str() + ".cfi";
Function *AliasDecl = Function::Create(
F->getFunctionType(), GlobalValue::ExternalLinkage,
F->getAddressSpace(), "", &M);
AliasDecl->takeName(A);
A->replaceAllUsesWith(AliasDecl);
- AliasesToErase.push_back(A);
+ A->setName(AliasName);
}
}
}
@@ -2077,16 +2075,13 @@ bool LowerTypeTestsModule::lower() {
Decls.push_back(&F);
}
- std::vector<GlobalAlias *> AliasesToErase;
{
ScopedSaveAliaseesAndUsed S(M);
for (auto *F : Defs)
- importFunction(F, /*isJumpTableCanonical*/ true, AliasesToErase);
+ importFunction(F, /*isJumpTableCanonical*/ true);
for (auto *F : Decls)
- importFunction(F, /*isJumpTableCanonical*/ false, AliasesToErase);
+ importFunction(F, /*isJumpTableCanonical*/ false);
}
- for (GlobalAlias *GA : AliasesToErase)
- GA->eraseFromParent();
return true;
}
@@ -2137,6 +2132,18 @@ bool LowerTypeTestsModule::lower() {
if (auto Alias = dyn_cast<AliasSummary>(RefGVS.get()))
AddressTaken.insert(Alias->getAliaseeGUID());
}
+ auto IsAddressTaken = [&](GlobalValue::GUID GUID) {
+ if (AddressTaken.count(GUID))
+ return true;
+ auto VI = ExportSummary->getValueInfo(GUID);
+ if (!VI)
+ return false;
+ for (auto &I : VI.getSummaryList())
+ if (auto Alias = dyn_cast<AliasSummary>(I.get()))
+ if (AddressTaken.count(Alias->getAliaseeGUID()))
+ return true;
+ return false;
+ };
for (auto *FuncMD : CfiFunctionsMD->operands()) {
assert(FuncMD->getNumOperands() >= 2);
StringRef FunctionName =
@@ -2153,7 +2160,7 @@ bool LowerTypeTestsModule::lower() {
// have no live references (and are not exported with cross-DSO CFI.)
if (!ExportSummary->isGUIDLive(GUID))
continue;
- if (!AddressTaken.count(GUID)) {
+ if (!IsAddressTaken(GUID)) {
if (!CrossDsoCfi || Linkage != CFL_Definition)
continue;
@@ -2227,6 +2234,43 @@ bool LowerTypeTestsModule::lower() {
}
}
+ struct AliasToCreate {
+ Function *Alias;
+ std::string TargetName;
+ };
+ std::vector<AliasToCreate> AliasesToCreate;
+
+ // Parse alias data to replace stand-in function declarations for aliases
+ // with an alias to the intended target.
+ if (ExportSummary) {
+ if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
+ for (auto *AliasMD : AliasesMD->operands()) {
+ SmallVector<Function *> Aliases;
+ for (Metadata *MD : AliasMD->operands()) {
+ auto *MDS = dyn_cast<MDString>(MD);
+ if (!MDS)
+ continue;
+ StringRef AliasName = MDS->getString();
+ if (!ExportedFunctions.count(AliasName))
+ continue;
+ auto *AliasF = M.getFunction(AliasName);
+ if (AliasF)
+ Aliases.push_back(AliasF);
+ }
+
+ if (Aliases.empty())
+ continue;
+
+ for (unsigned I = 1; I != Aliases.size(); ++I) {
+ auto *AliasF = Aliases[I];
+ ExportedFunctions.erase(AliasF->getName());
+ AliasesToCreate.push_back(
+ {AliasF, std::string(Aliases[0]->getName())});
+ }
+ }
+ }
+ }
+
DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
for (GlobalObject &GO : M.global_objects()) {
if (isa<GlobalVariable>(GO) && GO.isDeclarationForLinker())
@@ -2414,47 +2458,16 @@ bool LowerTypeTestsModule::lower() {
allocateByteArrays();
- // Parse alias data to replace stand-in function declarations for aliases
- // with an alias to the intended target.
- if (ExportSummary) {
- if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
- for (auto *AliasMD : AliasesMD->operands()) {
- assert(AliasMD->getNumOperands() >= 4);
- StringRef AliasName =
- cast<MDString>(AliasMD->getOperand(0))->getString();
- StringRef Aliasee = cast<MDString>(AliasMD->getOperand(1))->getString();
-
- if (auto It = ExportedFunctions.find(Aliasee);
- It == ExportedFunctions.end() ||
- It->second.Linkage != CFL_Definition || !M.getNamedAlias(Aliasee))
- continue;
-
- GlobalValue::VisibilityTypes Visibility =
- static_cast<GlobalValue::VisibilityTypes>(
- cast<ConstantAsMetadata>(AliasMD->getOperand(2))
- ->getValue()
- ->getUniqueInteger()
- .getZExtValue());
- bool Weak =
- static_cast<bool>(cast<ConstantAsMetadata>(AliasMD->getOperand(3))
- ->getValue()
- ->getUniqueInteger()
- .getZExtValue());
-
- auto *Alias = GlobalAlias::create("", M.getNamedAlias(Aliasee));
- Alias->setVisibility(Visibility);
- if (Weak)
- Alias->setLinkage(GlobalValue::WeakAnyLinkage);
-
- if (auto *F = M.getFunction(AliasName)) {
- Alias->takeName(F);
- F->replaceAllUsesWith(Alias);
- F->eraseFromParent();
- } else {
- Alias->setName(AliasName);
- }
- }
- }
+ for (auto A : AliasesToCreate) {
+ auto *Target = M.getNamedValue(A.TargetName);
+ if (!isa<GlobalAlias>(Target))
+ continue;
+ auto *AliasGA = GlobalAlias::create("", Target);
+ AliasGA->setVisibility(A.Alias->getVisibility());
+ AliasGA->setLinkage(A.Alias->getLinkage());
+ AliasGA->takeName(A.Alias);
+ A.Alias->replaceAllUsesWith(AliasGA);
+ A.Alias->eraseFromParent();
}
// Emit .symver directives for exported functions, if they exist.
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index 2b392fe..c009c1e 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -732,7 +732,7 @@ private:
/// of the functions tracked calls to their new versions in the CallMap.
/// Assigns new clones to clone number CloneNo.
FuncInfo cloneFunctionForCallsite(
- FuncInfo &Func, CallInfo &Call, std::map<CallInfo, CallInfo> &CallMap,
+ FuncInfo &Func, CallInfo &Call, DenseMap<CallInfo, CallInfo> &CallMap,
std::vector<CallInfo> &CallsWithMetadataInFunc, unsigned CloneNo) {
return static_cast<DerivedCCG *>(this)->cloneFunctionForCallsite(
Func, Call, CallMap, CallsWithMetadataInFunc, CloneNo);
@@ -899,7 +899,7 @@ private:
CallsiteContextGraph<ModuleCallsiteContextGraph, Function,
Instruction *>::FuncInfo
cloneFunctionForCallsite(FuncInfo &Func, CallInfo &Call,
- std::map<CallInfo, CallInfo> &CallMap,
+ DenseMap<CallInfo, CallInfo> &CallMap,
std::vector<CallInfo> &CallsWithMetadataInFunc,
unsigned CloneNo);
std::string getLabel(const Function *Func, const Instruction *Call,
@@ -991,7 +991,7 @@ private:
CallsiteContextGraph<IndexCallsiteContextGraph, FunctionSummary,
IndexCall>::FuncInfo
cloneFunctionForCallsite(FuncInfo &Func, CallInfo &Call,
- std::map<CallInfo, CallInfo> &CallMap,
+ DenseMap<CallInfo, CallInfo> &CallMap,
std::vector<CallInfo> &CallsWithMetadataInFunc,
unsigned CloneNo);
std::string getLabel(const FunctionSummary *Func, const IndexCall &Call,
@@ -4062,7 +4062,7 @@ static void updateSubprogramLinkageName(Function *NewFunc, StringRef Name) {
CallsiteContextGraph<ModuleCallsiteContextGraph, Function,
Instruction *>::FuncInfo
ModuleCallsiteContextGraph::cloneFunctionForCallsite(
- FuncInfo &Func, CallInfo &Call, std::map<CallInfo, CallInfo> &CallMap,
+ FuncInfo &Func, CallInfo &Call, DenseMap<CallInfo, CallInfo> &CallMap,
std::vector<CallInfo> &CallsWithMetadataInFunc, unsigned CloneNo) {
// Use existing LLVM facilities for cloning and obtaining Call in clone
ValueToValueMapTy VMap;
@@ -4085,7 +4085,7 @@ ModuleCallsiteContextGraph::cloneFunctionForCallsite(
CallsiteContextGraph<IndexCallsiteContextGraph, FunctionSummary,
IndexCall>::FuncInfo
IndexCallsiteContextGraph::cloneFunctionForCallsite(
- FuncInfo &Func, CallInfo &Call, std::map<CallInfo, CallInfo> &CallMap,
+ FuncInfo &Func, CallInfo &Call, DenseMap<CallInfo, CallInfo> &CallMap,
std::vector<CallInfo> &CallsWithMetadataInFunc, unsigned CloneNo) {
// Check how many clones we have of Call (and therefore function).
// The next clone number is the current size of versions array.
@@ -4506,7 +4506,7 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() {
FuncInfo FuncClone;
// Remappings of each call of interest (from original uncloned call to the
// corresponding cloned call in this function clone).
- std::map<CallInfo, CallInfo> CallMap;
+ DenseMap<CallInfo, CallInfo> CallMap;
};
// Walk all functions for which we saw calls with memprof metadata, and handle
@@ -4542,7 +4542,7 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() {
FuncCloneToCurNodeCloneMap[FuncClone] = CallsiteClone;
assert(FuncCloneInfos.size() > FuncClone.cloneNo());
- std::map<CallInfo, CallInfo> &CallMap =
+ DenseMap<CallInfo, CallInfo> &CallMap =
FuncCloneInfos[FuncClone.cloneNo()].CallMap;
CallInfo CallClone(Call);
if (auto It = CallMap.find(Call); It != CallMap.end())
@@ -4594,7 +4594,8 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() {
}));
// Initialize with empty call map, assign Clone to original function
// and its callers, and skip to the next clone.
- FuncCloneInfos.push_back({OrigFunc, {}});
+ FuncCloneInfos.push_back(
+ {OrigFunc, DenseMap<CallInfo, CallInfo>()});
AssignCallsiteCloneToFuncClone(
OrigFunc, Call, Clone,
AllocationCallToContextNodeMap.count(Call));
@@ -4627,7 +4628,7 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() {
// Clone function and save it along with the CallInfo map created
// during cloning in the FuncCloneInfos.
- std::map<CallInfo, CallInfo> NewCallMap;
+ DenseMap<CallInfo, CallInfo> NewCallMap;
unsigned CloneNo = FuncCloneInfos.size();
assert(CloneNo > 0 && "Clone 0 is the original function, which "
"should already exist in the map");
@@ -4734,7 +4735,7 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() {
// CallMap is set up as indexed by original Call at clone 0.
CallInfo OrigCall(Callee->getOrigNode()->Call);
OrigCall.setCloneNo(0);
- std::map<CallInfo, CallInfo> &CallMap =
+ DenseMap<CallInfo, CallInfo> &CallMap =
FuncCloneInfos[NewFuncClone.cloneNo()].CallMap;
assert(CallMap.count(OrigCall));
CallInfo NewCall(CallMap[OrigCall]);
diff --git a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
index e276376..4387c38 100644
--- a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
+++ b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
@@ -384,6 +384,10 @@ void splitAndWriteThinLTOBitcode(
for (auto &F : M)
if ((!F.hasLocalLinkage() || F.hasAddressTaken()) && HasTypeMetadata(&F))
CfiFunctions.insert(&F);
+ for (auto &A : M.aliases())
+ if (auto *F = dyn_cast<Function>(A.getAliasee()))
+ if (HasTypeMetadata(F))
+ CfiFunctions.insert(&A);
// Remove all globals with type metadata, globals with comdats that live in
// MergedM, and aliases pointing to such globals from the thin LTO module.
@@ -403,12 +407,12 @@ void splitAndWriteThinLTOBitcode(
auto &Ctx = MergedM->getContext();
SmallVector<MDNode *, 8> CfiFunctionMDs;
for (auto *V : CfiFunctions) {
- Function &F = *cast<Function>(V);
+ Function &F = *cast<Function>(V->getAliaseeObject());
SmallVector<MDNode *, 2> Types;
F.getMetadata(LLVMContext::MD_type, Types);
SmallVector<Metadata *, 4> Elts;
- Elts.push_back(MDString::get(Ctx, F.getName()));
+ Elts.push_back(MDString::get(Ctx, V->getName()));
CfiFunctionLinkage Linkage;
if (lowertypetests::isJumpTableCanonical(&F))
Linkage = CFL_Definition;
@@ -428,29 +432,24 @@ void splitAndWriteThinLTOBitcode(
NMD->addOperand(MD);
}
- SmallVector<MDNode *, 8> FunctionAliases;
+ MapVector<Function *, std::vector<GlobalAlias *>> FunctionAliases;
for (auto &A : M.aliases()) {
if (!isa<Function>(A.getAliasee()))
continue;
auto *F = cast<Function>(A.getAliasee());
-
- Metadata *Elts[] = {
- MDString::get(Ctx, A.getName()),
- MDString::get(Ctx, F->getName()),
- ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt8Ty(Ctx), A.getVisibility())),
- ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt8Ty(Ctx), A.isWeakForLinker())),
- };
-
- FunctionAliases.push_back(MDTuple::get(Ctx, Elts));
+ FunctionAliases[F].push_back(&A);
}
if (!FunctionAliases.empty()) {
NamedMDNode *NMD = MergedM->getOrInsertNamedMetadata("aliases");
- for (auto *MD : FunctionAliases)
- NMD->addOperand(MD);
+ for (auto &Alias : FunctionAliases) {
+ SmallVector<Metadata *> Elts;
+ Elts.push_back(MDString::get(Ctx, Alias.first->getName()));
+ for (auto *A : Alias.second)
+ Elts.push_back(MDString::get(Ctx, A->getName()));
+ NMD->addOperand(MDTuple::get(Ctx, Elts));
+ }
}
SmallVector<MDNode *, 8> Symvers;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index b231c04..d7971e8 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -11,10 +11,13 @@
//===----------------------------------------------------------------------===//
#include "InstCombineInternal.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Analysis/CmpInstAnalysis.h"
#include "llvm/Analysis/FloatingPointPredicateUtils.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Transforms/InstCombine/InstCombiner.h"
@@ -3589,6 +3592,154 @@ static Value *foldOrOfInversions(BinaryOperator &I,
return nullptr;
}
+/// Match \p V as "shufflevector -> bitcast" or "extractelement -> zext -> shl"
+/// patterns, which extract vector elements and pack them in the same relative
+/// positions.
+///
+/// \p Vec is the underlying vector being extracted from.
+/// \p Mask is a bitmask identifying which packed elements are obtained from the
+/// vector.
+/// \p VecOffset is the vector element corresponding to index 0 of the
+/// mask.
+static bool matchSubIntegerPackFromVector(Value *V, Value *&Vec,
+ int64_t &VecOffset,
+ SmallBitVector &Mask,
+ const DataLayout &DL) {
+ static const auto m_ConstShlOrSelf = [](const auto &Base, uint64_t &ShlAmt) {
+ ShlAmt = 0;
+ return m_CombineOr(m_Shl(Base, m_ConstantInt(ShlAmt)), Base);
+ };
+
+ // First try to match extractelement -> zext -> shl
+ uint64_t VecIdx, ShlAmt;
+ if (match(V, m_ConstShlOrSelf(m_ZExtOrSelf(m_ExtractElt(
+ m_Value(Vec), m_ConstantInt(VecIdx))),
+ ShlAmt))) {
+ auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
+ if (!VecTy)
+ return false;
+ auto *EltTy = dyn_cast<IntegerType>(VecTy->getElementType());
+ if (!EltTy)
+ return false;
+
+ const unsigned EltBitWidth = EltTy->getBitWidth();
+ const unsigned TargetBitWidth = V->getType()->getIntegerBitWidth();
+ if (TargetBitWidth % EltBitWidth != 0 || ShlAmt % EltBitWidth != 0)
+ return false;
+ const unsigned TargetEltWidth = TargetBitWidth / EltBitWidth;
+ const unsigned ShlEltAmt = ShlAmt / EltBitWidth;
+
+ const unsigned MaskIdx =
+ DL.isLittleEndian() ? ShlEltAmt : TargetEltWidth - ShlEltAmt - 1;
+
+ VecOffset = static_cast<int64_t>(VecIdx) - static_cast<int64_t>(MaskIdx);
+ Mask.resize(TargetEltWidth);
+ Mask.set(MaskIdx);
+ return true;
+ }
+
+ // Now try to match a bitcasted subvector.
+ Instruction *SrcVecI;
+ if (!match(V, m_BitCast(m_Instruction(SrcVecI))))
+ return false;
+
+ auto *SrcTy = dyn_cast<FixedVectorType>(SrcVecI->getType());
+ if (!SrcTy)
+ return false;
+
+ Mask.resize(SrcTy->getNumElements());
+
+ // First check for a subvector obtained from a shufflevector.
+ if (isa<ShuffleVectorInst>(SrcVecI)) {
+ Constant *ConstVec;
+ ArrayRef<int> ShuffleMask;
+ if (!match(SrcVecI, m_Shuffle(m_Value(Vec), m_Constant(ConstVec),
+ m_Mask(ShuffleMask))))
+ return false;
+
+ auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
+ if (!VecTy)
+ return false;
+
+ const unsigned NumVecElts = VecTy->getNumElements();
+ bool FoundVecOffset = false;
+ for (unsigned Idx = 0; Idx < ShuffleMask.size(); ++Idx) {
+ if (ShuffleMask[Idx] == PoisonMaskElem)
+ return false;
+ const unsigned ShuffleIdx = ShuffleMask[Idx];
+ if (ShuffleIdx >= NumVecElts) {
+ const unsigned ConstIdx = ShuffleIdx - NumVecElts;
+ auto *ConstElt =
+ dyn_cast<ConstantInt>(ConstVec->getAggregateElement(ConstIdx));
+ if (!ConstElt || !ConstElt->isNullValue())
+ return false;
+ continue;
+ }
+
+ if (FoundVecOffset) {
+ if (VecOffset + Idx != ShuffleIdx)
+ return false;
+ } else {
+ if (ShuffleIdx < Idx)
+ return false;
+ VecOffset = ShuffleIdx - Idx;
+ FoundVecOffset = true;
+ }
+ Mask.set(Idx);
+ }
+ return FoundVecOffset;
+ }
+
+ // Check for a subvector obtained as an (insertelement V, 0, idx)
+ uint64_t InsertIdx;
+ if (!match(SrcVecI,
+ m_InsertElt(m_Value(Vec), m_Zero(), m_ConstantInt(InsertIdx))))
+ return false;
+
+ auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
+ if (!VecTy)
+ return false;
+ VecOffset = 0;
+ bool AlreadyInsertedMaskedElt = Mask.test(InsertIdx);
+ Mask.set();
+ if (!AlreadyInsertedMaskedElt)
+ Mask.reset(InsertIdx);
+ return true;
+}
+
+/// Try to fold the join of two scalar integers whose contents are packed
+/// elements of the same vector.
+static Instruction *foldIntegerPackFromVector(Instruction &I,
+ InstCombiner::BuilderTy &Builder,
+ const DataLayout &DL) {
+ assert(I.getOpcode() == Instruction::Or);
+ Value *LhsVec, *RhsVec;
+ int64_t LhsVecOffset, RhsVecOffset;
+ SmallBitVector Mask;
+ if (!matchSubIntegerPackFromVector(I.getOperand(0), LhsVec, LhsVecOffset,
+ Mask, DL))
+ return nullptr;
+ if (!matchSubIntegerPackFromVector(I.getOperand(1), RhsVec, RhsVecOffset,
+ Mask, DL))
+ return nullptr;
+ if (LhsVec != RhsVec || LhsVecOffset != RhsVecOffset)
+ return nullptr;
+
+ // Convert into shufflevector -> bitcast;
+ const unsigned ZeroVecIdx =
+ cast<FixedVectorType>(LhsVec->getType())->getNumElements();
+ SmallVector<int> ShuffleMask(Mask.size(), ZeroVecIdx);
+ for (unsigned Idx : Mask.set_bits()) {
+ assert(LhsVecOffset + Idx >= 0);
+ ShuffleMask[Idx] = LhsVecOffset + Idx;
+ }
+
+ Value *MaskedVec = Builder.CreateShuffleVector(
+ LhsVec, Constant::getNullValue(LhsVec->getType()), ShuffleMask,
+ I.getName() + ".v");
+ return CastInst::Create(Instruction::BitCast, MaskedVec, I.getType());
+}
+
// A decomposition of ((X & Mask) * Factor). The NUW / NSW bools
// track these properities for preservation. Note that we can decompose
// equivalent select form of this expression (e.g. (!(X & Mask) ? 0 : Mask *
@@ -3766,6 +3917,9 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
return X;
+ if (Instruction *X = foldIntegerPackFromVector(I, Builder, DL))
+ return X;
+
// (A & B) | (C & D) -> A ^ D where A == ~C && B == ~D
// (A & B) | (C & D) -> A ^ C where A == ~D && B == ~C
if (Value *V = foldOrOfInversions(I, Builder))
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 1b78ace..47e017e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3891,16 +3891,20 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
}
- // Try to fold intrinsic into select operands. This is legal if:
+ // Try to fold intrinsic into select/phi operands. This is legal if:
// * The intrinsic is speculatable.
// * The select condition is not a vector, or the intrinsic does not
// perform cross-lane operations.
if (isSafeToSpeculativelyExecuteWithVariableReplaced(&CI) &&
isNotCrossLaneOperation(II))
- for (Value *Op : II->args())
+ for (Value *Op : II->args()) {
if (auto *Sel = dyn_cast<SelectInst>(Op))
if (Instruction *R = FoldOpIntoSelect(*II, Sel))
return R;
+ if (auto *Phi = dyn_cast<PHINode>(Op))
+ if (Instruction *R = foldOpIntoPhi(*II, Phi))
+ return R;
+ }
if (Instruction *Shuf = foldShuffledIntrinsicOperands(II))
return Shuf;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index da9b126..b268fea 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -163,6 +163,11 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
LaterIndices.push_back(IdxVal);
}
+ Value *Idx = GEP->getOperand(2);
+ // If the index type is non-canonical, wait for it to be canonicalized.
+ if (Idx->getType() != DL.getIndexType(GEP->getType()))
+ return nullptr;
+
enum { Overdefined = -3, Undefined = -2 };
// Variables for our state machines.
@@ -290,17 +295,6 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
// Now that we've scanned the entire array, emit our new comparison(s). We
// order the state machines in complexity of the generated code.
- Value *Idx = GEP->getOperand(2);
-
- // If the index is larger than the pointer offset size of the target, truncate
- // the index down like the GEP would do implicitly. We don't have to do this
- // for an inbounds GEP because the index can't be out of range.
- if (!GEP->isInBounds()) {
- Type *PtrIdxTy = DL.getIndexType(GEP->getType());
- unsigned OffsetSize = PtrIdxTy->getIntegerBitWidth();
- if (Idx->getType()->getPrimitiveSizeInBits().getFixedValue() > OffsetSize)
- Idx = Builder.CreateTrunc(Idx, PtrIdxTy);
- }
// If inbounds keyword is not present, Idx * ElementSize can overflow.
// Let's assume that ElementSize is 2 and the wanted value is at offset 0.
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index e2a9255..56358b1 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1994,6 +1994,8 @@ Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN,
}
Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
Clones.insert({OpBB, Clone});
+ // We may have speculated the instruction.
+ Clone->dropUBImplyingAttrsAndMetadata();
}
NewPhiValues[OpIndex] = Clone;
@@ -2777,6 +2779,12 @@ Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
Indices.append(GEP.idx_begin()+1, GEP.idx_end());
}
+ // Don't create GEPs with more than one variable index.
+ unsigned NumVarIndices =
+ count_if(Indices, [](Value *Idx) { return !isa<Constant>(Idx); });
+ if (NumVarIndices > 1)
+ return nullptr;
+
if (!Indices.empty())
return replaceInstUsesWith(
GEP, Builder.CreateGEP(
@@ -3176,7 +3184,16 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If we are using a wider index than needed for this platform, shrink
// it to what we need. If narrower, sign-extend it to what we need.
// This explicit cast can make subsequent optimizations more obvious.
- *I = Builder.CreateIntCast(*I, NewIndexType, true);
+ if (IndexTy->getScalarSizeInBits() <
+ NewIndexType->getScalarSizeInBits()) {
+ if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
+ *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
+ else
+ *I = Builder.CreateSExt(*I, NewIndexType);
+ } else {
+ *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
+ GEP.hasNoUnsignedSignedWrap());
+ }
MadeChange = true;
}
}
@@ -3199,6 +3216,14 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
return replaceInstUsesWith(GEP, NewGEP);
}
+ // Strip trailing zero indices.
+ auto *LastIdx = dyn_cast<Constant>(Indices.back());
+ if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
+ return replaceInstUsesWith(
+ GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
+ drop_end(Indices), "", GEP.getNoWrapFlags()));
+ }
+
// Scalarize vector operands; prefer splat-of-gep.as canonical form.
// Note that this looses information about undef lanes; we run it after
// demanded bits to partially mitigate that loss.
@@ -3225,6 +3250,30 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
return replaceInstUsesWith(GEP, Res);
}
+ bool SeenVarIndex = false;
+ for (auto [IdxNum, Idx] : enumerate(Indices)) {
+ if (isa<Constant>(Idx))
+ continue;
+
+ if (!SeenVarIndex) {
+ SeenVarIndex = true;
+ continue;
+ }
+
+ // GEP has multiple variable indices: Split it.
+ ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
+ Value *FrontGEP =
+ Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
+ GEP.getName() + ".split", GEP.getNoWrapFlags());
+
+ SmallVector<Value *> BackIndices;
+ BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
+ append_range(BackIndices, drop_begin(Indices, IdxNum));
+ return GetElementPtrInst::Create(
+ GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
+ BackIndices, GEP.getNoWrapFlags());
+ }
+
// Check to see if the inputs to the PHI node are getelementptr instructions.
if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 4e5a8d1..bcb90d6 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -160,6 +160,16 @@ static cl::opt<bool> ClGenerateTagsWithCalls(
static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
cl::Hidden, cl::init(false));
+static cl::opt<bool> ClAllGlobals(
+ "hwasan-all-globals",
+ cl::desc(
+ "Instrument globals, even those within user-defined sections. Warning: "
+ "This may break existing code which walks globals via linker-generated "
+ "symbols, expects certain globals to be contiguous with each other, or "
+ "makes other assumptions which are invalidated by HWASan "
+ "instrumentation."),
+ cl::Hidden, cl::init(false));
+
static cl::opt<int> ClMatchAllTag(
"hwasan-match-all-tag",
cl::desc("don't report bad accesses via pointers with this tag"),
@@ -681,11 +691,11 @@ void HWAddressSanitizer::initializeModule() {
!CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
if (!CompileKernel) {
- createHwasanCtorComdat();
-
if (InstrumentGlobals)
instrumentGlobals();
+ createHwasanCtorComdat();
+
bool InstrumentPersonalityFunctions =
optOr(ClInstrumentPersonalityFunctions, NewRuntime);
if (InstrumentPersonalityFunctions)
@@ -1772,11 +1782,17 @@ void HWAddressSanitizer::instrumentGlobals() {
if (GV.hasCommonLinkage())
continue;
- // Globals with custom sections may be used in __start_/__stop_ enumeration,
- // which would be broken both by adding tags and potentially by the extra
- // padding/alignment that we insert.
- if (GV.hasSection())
- continue;
+ if (ClAllGlobals) {
+ // Avoid instrumenting intrinsic global variables.
+ if (GV.getSection() == "llvm.metadata")
+ continue;
+ } else {
+ // Globals with custom sections may be used in __start_/__stop_
+ // enumeration, which would be broken both by adding tags and potentially
+ // by the extra padding/alignment that we insert.
+ if (GV.hasSection())
+ continue;
+ }
Globals.push_back(&GV);
}
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index df31f07..54d9a83 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -4769,6 +4769,79 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOriginForNaryOp(I);
}
+ // Approximately handle AVX Galois Field Affine Transformation
+ //
+ // e.g.,
+ // <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
+ // <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8)
+ // <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8>, <64 x i8>, i8)
+ // Out A x b
+ // where A and x are packed matrices, b is a vector,
+ // Out = A * x + b in GF(2)
+ //
+ // Multiplication in GF(2) is equivalent to bitwise AND. However, the matrix
+ // computation also includes a parity calculation.
+ //
+ // For the bitwise AND of bits V1 and V2, the exact shadow is:
+ // Out_Shadow = (V1_Shadow & V2_Shadow)
+ // | (V1 & V2_Shadow)
+ // | (V1_Shadow & V2 )
+ //
+ // We approximate the shadow of gf2p8affineqb using:
+ // Out_Shadow = gf2p8affineqb(x_Shadow, A_shadow, 0)
+ // | gf2p8affineqb(x, A_shadow, 0)
+ // | gf2p8affineqb(x_Shadow, A, 0)
+ // | set1_epi8(b_Shadow)
+ //
+ // This approximation has false negatives: if an intermediate dot-product
+ // contains an even number of 1's, the parity is 0.
+ // It has no false positives.
+ void handleAVXGF2P8Affine(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+
+ assert(I.arg_size() == 3);
+ Value *A = I.getOperand(0);
+ Value *X = I.getOperand(1);
+ Value *B = I.getOperand(2);
+
+ assert(isFixedIntVector(A));
+ assert(cast<VectorType>(A->getType())
+ ->getElementType()
+ ->getScalarSizeInBits() == 8);
+
+ assert(A->getType() == X->getType());
+
+ assert(B->getType()->isIntegerTy());
+ assert(B->getType()->getScalarSizeInBits() == 8);
+
+ assert(I.getType() == A->getType());
+
+ Value *AShadow = getShadow(A);
+ Value *XShadow = getShadow(X);
+ Value *BZeroShadow = getCleanShadow(B);
+
+ CallInst *AShadowXShadow = IRB.CreateIntrinsic(
+ I.getType(), I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
+ CallInst *AShadowX = IRB.CreateIntrinsic(I.getType(), I.getIntrinsicID(),
+ {X, AShadow, BZeroShadow});
+ CallInst *XShadowA = IRB.CreateIntrinsic(I.getType(), I.getIntrinsicID(),
+ {XShadow, A, BZeroShadow});
+
+ unsigned NumElements = cast<FixedVectorType>(I.getType())->getNumElements();
+ Value *BShadow = getShadow(B);
+ Value *BBroadcastShadow = getCleanShadow(AShadow);
+ // There is no LLVM IR intrinsic for _mm512_set1_epi8.
+ // This loop generates a lot of LLVM IR, which we expect that CodeGen will
+ // lower appropriately (e.g., VPBROADCASTB).
+ // Besides, b is often a constant, in which case it is fully initialized.
+ for (unsigned i = 0; i < NumElements; i++)
+ BBroadcastShadow = IRB.CreateInsertElement(BBroadcastShadow, BShadow, i);
+
+ setShadow(&I, IRB.CreateOr(
+ {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
+ setOriginForNaryOp(I);
+ }
+
// Handle Arm NEON vector load intrinsics (vld*).
//
// The WithLane instructions (ld[234]lane) are similar to:
@@ -5604,6 +5677,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
break;
}
+ // AVX Galois Field New Instructions
+ case Intrinsic::x86_vgf2p8affineqb_128:
+ case Intrinsic::x86_vgf2p8affineqb_256:
+ case Intrinsic::x86_vgf2p8affineqb_512:
+ handleAVXGF2P8Affine(I);
+ break;
+
case Intrinsic::fshl:
case Intrinsic::fshr:
handleFunnelShift(I);
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 0f63ed0..9b87180 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1360,13 +1360,10 @@ struct DSEState {
/// indicating whether \p I is a free-like call.
std::optional<std::pair<MemoryLocation, bool>>
getLocForTerminator(Instruction *I) const {
- uint64_t Len;
- Value *Ptr;
- if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
- m_Value(Ptr))))
- return {std::make_pair(MemoryLocation(Ptr, Len), false)};
-
if (auto *CB = dyn_cast<CallBase>(I)) {
+ if (CB->getIntrinsicID() == Intrinsic::lifetime_end)
+ return {
+ std::make_pair(MemoryLocation::getForArgument(CB, 1, &TLI), false)};
if (Value *FreedOp = getFreedOperand(CB, &TLI))
return {std::make_pair(MemoryLocation::getAfter(FreedOp), true)};
}
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index e706a6f..deff79b 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -237,8 +237,7 @@ class InductiveRangeCheckElimination {
DominatorTree &DT;
LoopInfo &LI;
- using GetBFIFunc =
- std::optional<llvm::function_ref<llvm::BlockFrequencyInfo &()>>;
+ using GetBFIFunc = llvm::function_ref<llvm::BlockFrequencyInfo &()>;
GetBFIFunc GetBFI;
// Returns the estimated number of iterations based on block frequency info if
@@ -249,7 +248,7 @@ class InductiveRangeCheckElimination {
public:
InductiveRangeCheckElimination(ScalarEvolution &SE,
BranchProbabilityInfo *BPI, DominatorTree &DT,
- LoopInfo &LI, GetBFIFunc GetBFI = std::nullopt)
+ LoopInfo &LI, GetBFIFunc GetBFI = nullptr)
: SE(SE), BPI(BPI), DT(DT), LI(LI), GetBFI(GetBFI) {}
bool run(Loop *L, function_ref<void(Loop *, bool)> LPMAddNewLoop);
@@ -959,7 +958,7 @@ PreservedAnalyses IRCEPass::run(Function &F, FunctionAnalysisManager &AM) {
std::optional<uint64_t>
InductiveRangeCheckElimination::estimatedTripCount(const Loop &L) {
if (GetBFI) {
- BlockFrequencyInfo &BFI = (*GetBFI)();
+ BlockFrequencyInfo &BFI = GetBFI();
uint64_t hFreq = BFI.getBlockFreq(L.getHeader()).getFrequency();
uint64_t phFreq = BFI.getBlockFreq(L.getLoopPreheader()).getFrequency();
if (phFreq == 0 || hFreq == 0)
diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp
index 6a3f656..1a52af1 100644
--- a/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -651,7 +651,7 @@ class NewGVN {
BitVector TouchedInstructions;
DenseMap<const BasicBlock *, std::pair<unsigned, unsigned>> BlockInstRange;
- mutable DenseMap<const IntrinsicInst *, const Value *> IntrinsicInstPred;
+ mutable DenseMap<const IntrinsicInst *, const Value *> PredicateSwapChoice;
#ifndef NDEBUG
// Debugging for how many times each block and instruction got processed.
@@ -840,7 +840,7 @@ private:
// Ranking
unsigned int getRank(const Value *) const;
bool shouldSwapOperands(const Value *, const Value *) const;
- bool shouldSwapOperandsForIntrinsic(const Value *, const Value *,
+ bool shouldSwapOperandsForPredicate(const Value *, const Value *,
const IntrinsicInst *I) const;
// Reachability handling.
@@ -1624,7 +1624,7 @@ NewGVN::performSymbolicPredicateInfoEvaluation(IntrinsicInst *I) const {
Value *AdditionallyUsedValue = CmpOp0;
// Sort the ops.
- if (shouldSwapOperandsForIntrinsic(FirstOp, SecondOp, I)) {
+ if (shouldSwapOperandsForPredicate(FirstOp, SecondOp, I)) {
std::swap(FirstOp, SecondOp);
Predicate = CmpInst::getSwappedPredicate(Predicate);
AdditionallyUsedValue = CmpOp1;
@@ -3024,7 +3024,7 @@ void NewGVN::cleanupTables() {
PredicateToUsers.clear();
MemoryToUsers.clear();
RevisitOnReachabilityChange.clear();
- IntrinsicInstPred.clear();
+ PredicateSwapChoice.clear();
}
// Assign local DFS number mapping to instructions, and leave space for Value
@@ -4250,20 +4250,18 @@ bool NewGVN::shouldSwapOperands(const Value *A, const Value *B) const {
return std::make_pair(getRank(A), A) > std::make_pair(getRank(B), B);
}
-bool NewGVN::shouldSwapOperandsForIntrinsic(const Value *A, const Value *B,
+bool NewGVN::shouldSwapOperandsForPredicate(const Value *A, const Value *B,
const IntrinsicInst *I) const {
- auto LookupResult = IntrinsicInstPred.find(I);
if (shouldSwapOperands(A, B)) {
- if (LookupResult == IntrinsicInstPred.end())
- IntrinsicInstPred.insert({I, B});
- else
- LookupResult->second = B;
+ PredicateSwapChoice[I] = B;
return true;
}
- if (LookupResult != IntrinsicInstPred.end()) {
+ auto LookupResult = PredicateSwapChoice.find(I);
+ if (LookupResult != PredicateSwapChoice.end()) {
auto *SeenPredicate = LookupResult->second;
if (SeenPredicate) {
+ // We previously decided to swap B to the left. Keep that choice.
if (SeenPredicate == B)
return true;
else
diff --git a/llvm/lib/Transforms/Utils/SCCPSolver.cpp b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
index b9292af..b78c702 100644
--- a/llvm/lib/Transforms/Utils/SCCPSolver.cpp
+++ b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
@@ -703,6 +703,7 @@ private:
// Add U as additional user of V.
void addAdditionalUser(Value *V, User *U) { AdditionalUsers[V].insert(U); }
+ void handlePredicate(Instruction *I, Value *CopyOf, const PredicateBase *PI);
void handleCallOverdefined(CallBase &CB);
void handleCallResult(CallBase &CB);
void handleCallArguments(CallBase &CB);
@@ -1927,6 +1928,75 @@ void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
}
}
+void SCCPInstVisitor::handlePredicate(Instruction *I, Value *CopyOf,
+ const PredicateBase *PI) {
+ ValueLatticeElement CopyOfVal = getValueState(CopyOf);
+ const std::optional<PredicateConstraint> &Constraint = PI->getConstraint();
+ if (!Constraint) {
+ mergeInValue(ValueState[I], I, CopyOfVal);
+ return;
+ }
+
+ CmpInst::Predicate Pred = Constraint->Predicate;
+ Value *OtherOp = Constraint->OtherOp;
+
+ // Wait until OtherOp is resolved.
+ if (getValueState(OtherOp).isUnknown()) {
+ addAdditionalUser(OtherOp, I);
+ return;
+ }
+
+ ValueLatticeElement CondVal = getValueState(OtherOp);
+ ValueLatticeElement &IV = ValueState[I];
+ if (CondVal.isConstantRange() || CopyOfVal.isConstantRange()) {
+ auto ImposedCR =
+ ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType()));
+
+ // Get the range imposed by the condition.
+ if (CondVal.isConstantRange())
+ ImposedCR = ConstantRange::makeAllowedICmpRegion(
+ Pred, CondVal.getConstantRange());
+
+ // Combine range info for the original value with the new range from the
+ // condition.
+ auto CopyOfCR = CopyOfVal.asConstantRange(CopyOf->getType(),
+ /*UndefAllowed=*/true);
+ // Treat an unresolved input like a full range.
+ if (CopyOfCR.isEmptySet())
+ CopyOfCR = ConstantRange::getFull(CopyOfCR.getBitWidth());
+ auto NewCR = ImposedCR.intersectWith(CopyOfCR);
+ // If the existing information is != x, do not use the information from
+ // a chained predicate, as the != x information is more likely to be
+ // helpful in practice.
+ if (!CopyOfCR.contains(NewCR) && CopyOfCR.getSingleMissingElement())
+ NewCR = CopyOfCR;
+
+ // The new range is based on a branch condition. That guarantees that
+ // neither of the compare operands can be undef in the branch targets,
+ // unless we have conditions that are always true/false (e.g. icmp ule
+ // i32, %a, i32_max). For the latter overdefined/empty range will be
+ // inferred, but the branch will get folded accordingly anyways.
+ addAdditionalUser(OtherOp, I);
+ mergeInValue(
+ IV, I, ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef*/ false));
+ return;
+ } else if (Pred == CmpInst::ICMP_EQ &&
+ (CondVal.isConstant() || CondVal.isNotConstant())) {
+ // For non-integer values or integer constant expressions, only
+ // propagate equal constants or not-constants.
+ addAdditionalUser(OtherOp, I);
+ mergeInValue(IV, I, CondVal);
+ return;
+ } else if (Pred == CmpInst::ICMP_NE && CondVal.isConstant()) {
+ // Propagate inequalities.
+ addAdditionalUser(OtherOp, I);
+ mergeInValue(IV, I, ValueLatticeElement::getNot(CondVal.getConstant()));
+ return;
+ }
+
+ return (void)mergeInValue(IV, I, CopyOfVal);
+}
+
void SCCPInstVisitor::handleCallResult(CallBase &CB) {
Function *F = CB.getCalledFunction();
@@ -1936,77 +2006,10 @@ void SCCPInstVisitor::handleCallResult(CallBase &CB) {
return;
Value *CopyOf = CB.getOperand(0);
- ValueLatticeElement CopyOfVal = getValueState(CopyOf);
- const auto *PI = getPredicateInfoFor(&CB);
+ const PredicateBase *PI = getPredicateInfoFor(&CB);
assert(PI && "Missing predicate info for ssa.copy");
-
- const std::optional<PredicateConstraint> &Constraint =
- PI->getConstraint();
- if (!Constraint) {
- mergeInValue(ValueState[&CB], &CB, CopyOfVal);
- return;
- }
-
- CmpInst::Predicate Pred = Constraint->Predicate;
- Value *OtherOp = Constraint->OtherOp;
-
- // Wait until OtherOp is resolved.
- if (getValueState(OtherOp).isUnknown()) {
- addAdditionalUser(OtherOp, &CB);
- return;
- }
-
- ValueLatticeElement CondVal = getValueState(OtherOp);
- ValueLatticeElement &IV = ValueState[&CB];
- if (CondVal.isConstantRange() || CopyOfVal.isConstantRange()) {
- auto ImposedCR =
- ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType()));
-
- // Get the range imposed by the condition.
- if (CondVal.isConstantRange())
- ImposedCR = ConstantRange::makeAllowedICmpRegion(
- Pred, CondVal.getConstantRange());
-
- // Combine range info for the original value with the new range from the
- // condition.
- auto CopyOfCR = CopyOfVal.asConstantRange(CopyOf->getType(),
- /*UndefAllowed=*/true);
- // Treat an unresolved input like a full range.
- if (CopyOfCR.isEmptySet())
- CopyOfCR = ConstantRange::getFull(CopyOfCR.getBitWidth());
- auto NewCR = ImposedCR.intersectWith(CopyOfCR);
- // If the existing information is != x, do not use the information from
- // a chained predicate, as the != x information is more likely to be
- // helpful in practice.
- if (!CopyOfCR.contains(NewCR) && CopyOfCR.getSingleMissingElement())
- NewCR = CopyOfCR;
-
- // The new range is based on a branch condition. That guarantees that
- // neither of the compare operands can be undef in the branch targets,
- // unless we have conditions that are always true/false (e.g. icmp ule
- // i32, %a, i32_max). For the latter overdefined/empty range will be
- // inferred, but the branch will get folded accordingly anyways.
- addAdditionalUser(OtherOp, &CB);
- mergeInValue(
- IV, &CB,
- ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef*/ false));
- return;
- } else if (Pred == CmpInst::ICMP_EQ &&
- (CondVal.isConstant() || CondVal.isNotConstant())) {
- // For non-integer values or integer constant expressions, only
- // propagate equal constants or not-constants.
- addAdditionalUser(OtherOp, &CB);
- mergeInValue(IV, &CB, CondVal);
- return;
- } else if (Pred == CmpInst::ICMP_NE && CondVal.isConstant()) {
- // Propagate inequalities.
- addAdditionalUser(OtherOp, &CB);
- mergeInValue(IV, &CB,
- ValueLatticeElement::getNot(CondVal.getConstant()));
- return;
- }
-
- return (void)mergeInValue(IV, &CB, CopyOfVal);
+ handlePredicate(&CB, CopyOf, PI);
+ return;
}
if (II->getIntrinsicID() == Intrinsic::vscale) {
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 571fa11..1eb8996 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -1249,7 +1249,8 @@ Value *SCEVExpander::tryToReuseLCSSAPhi(const SCEVAddRecExpr *S) {
// offset, if the offset is simpler.
const SCEV *Diff = SE.getMinusSCEV(S, ExitSCEV);
const SCEV *Op = Diff;
- match(Diff, m_scev_Mul(m_scev_AllOnes(), m_SCEV(Op)));
+ match(Op, m_scev_Add(m_SCEVConstant(), m_SCEV(Op)));
+ match(Op, m_scev_Mul(m_scev_AllOnes(), m_SCEV(Op)));
match(Op, m_scev_PtrToInt(m_SCEV(Op)));
if (!isa<SCEVConstant, SCEVUnknown>(Op))
continue;
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 94b0ab8..674de57 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -198,6 +198,11 @@ static cl::opt<unsigned> MaxSwitchCasesPerResult(
"max-switch-cases-per-result", cl::Hidden, cl::init(16),
cl::desc("Limit cases to analyze when converting a switch to select"));
+static cl::opt<unsigned> MaxJumpThreadingLiveBlocks(
+ "max-jump-threading-live-blocks", cl::Hidden, cl::init(24),
+ cl::desc("Limit number of blocks a define in a threaded block is allowed "
+ "to be live in"));
+
STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
STATISTIC(NumLinearMaps,
"Number of switch instructions turned into linear mapping");
@@ -3390,8 +3395,27 @@ bool SimplifyCFGOpt::speculativelyExecuteBB(BranchInst *BI,
return true;
}
+using BlocksSet = SmallPtrSet<BasicBlock *, 8>;
+
+// Return false if number of blocks searched is too much.
+static bool findReaching(BasicBlock *BB, BasicBlock *DefBB,
+ BlocksSet &ReachesNonLocalUses) {
+ if (BB == DefBB)
+ return true;
+ if (!ReachesNonLocalUses.insert(BB).second)
+ return true;
+
+ if (ReachesNonLocalUses.size() > MaxJumpThreadingLiveBlocks)
+ return false;
+ for (BasicBlock *Pred : predecessors(BB))
+ if (!findReaching(Pred, DefBB, ReachesNonLocalUses))
+ return false;
+ return true;
+}
+
/// Return true if we can thread a branch across this block.
-static bool blockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
+static bool blockIsSimpleEnoughToThreadThrough(BasicBlock *BB,
+ BlocksSet &NonLocalUseBlocks) {
int Size = 0;
EphemeralValueTracker EphTracker;
@@ -3411,12 +3435,16 @@ static bool blockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
return false; // Don't clone large BB's.
}
- // We can only support instructions that do not define values that are
- // live outside of the current basic block.
+ // Record blocks with non-local uses of values defined in the current basic
+ // block.
for (User *U : I.users()) {
Instruction *UI = cast<Instruction>(U);
- if (UI->getParent() != BB || isa<PHINode>(UI))
- return false;
+ BasicBlock *UsedInBB = UI->getParent();
+ if (UsedInBB == BB) {
+ if (isa<PHINode>(UI))
+ return false;
+ } else
+ NonLocalUseBlocks.insert(UsedInBB);
}
// Looks ok, continue checking.
@@ -3475,18 +3503,37 @@ foldCondBranchOnValueKnownInPredecessorImpl(BranchInst *BI, DomTreeUpdater *DTU,
return false;
// Now we know that this block has multiple preds and two succs.
- // Check that the block is small enough and values defined in the block are
- // not used outside of it.
- if (!blockIsSimpleEnoughToThreadThrough(BB))
+ // Check that the block is small enough and record which non-local blocks use
+ // values defined in the block.
+
+ BlocksSet NonLocalUseBlocks;
+ BlocksSet ReachesNonLocalUseBlocks;
+ if (!blockIsSimpleEnoughToThreadThrough(BB, NonLocalUseBlocks))
return false;
+ // Jump-threading can only be done to destinations where no values defined
+ // in BB are live.
+
+ // Quickly check if both destinations have uses. If so, jump-threading cannot
+ // be done.
+ if (NonLocalUseBlocks.contains(BI->getSuccessor(0)) &&
+ NonLocalUseBlocks.contains(BI->getSuccessor(1)))
+ return false;
+
+ // Search backward from NonLocalUseBlocks to find which blocks
+ // reach non-local uses.
+ for (BasicBlock *UseBB : NonLocalUseBlocks)
+ // Give up if too many blocks are searched.
+ if (!findReaching(UseBB, BB, ReachesNonLocalUseBlocks))
+ return false;
+
for (const auto &Pair : KnownValues) {
- // Okay, we now know that all edges from PredBB should be revectored to
- // branch to RealDest.
ConstantInt *CB = Pair.first;
ArrayRef<BasicBlock *> PredBBs = Pair.second.getArrayRef();
BasicBlock *RealDest = BI->getSuccessor(!CB->getZExtValue());
+ // Okay, we now know that all edges from PredBB should be revectored to
+ // branch to RealDest.
if (RealDest == BB)
continue; // Skip self loops.
@@ -3496,6 +3543,10 @@ foldCondBranchOnValueKnownInPredecessorImpl(BranchInst *BI, DomTreeUpdater *DTU,
}))
continue;
+ // Only revector to RealDest if no values defined in BB are live.
+ if (ReachesNonLocalUseBlocks.contains(RealDest))
+ continue;
+
LLVM_DEBUG({
dbgs() << "Condition " << *Cond << " in " << BB->getName()
<< " has value " << *Pair.first << " in predecessors:\n";
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
index f57ce0c..ea0fa06 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
@@ -170,8 +170,7 @@ public:
new VPInstruction(Opcode, Operands, Flags, DL, Name));
}
- VPInstruction *createNaryOp(unsigned Opcode,
- std::initializer_list<VPValue *> Operands,
+ VPInstruction *createNaryOp(unsigned Opcode, ArrayRef<VPValue *> Operands,
Type *ResultTy, const VPIRFlags &Flags = {},
DebugLoc DL = DebugLoc::getUnknown(),
const Twine &Name = "") {
@@ -180,7 +179,7 @@ public:
}
VPInstruction *createOverflowingOp(unsigned Opcode,
- std::initializer_list<VPValue *> Operands,
+ ArrayRef<VPValue *> Operands,
VPRecipeWithIRFlags::WrapFlagsTy WrapFlags,
DebugLoc DL = DebugLoc::getUnknown(),
const Twine &Name = "") {
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 7b7efb8..850c4a1 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -93,6 +93,7 @@
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionPatternMatch.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -155,6 +156,7 @@
#include <utility>
using namespace llvm;
+using namespace SCEVPatternMatch;
#define LV_NAME "loop-vectorize"
#define DEBUG_TYPE LV_NAME
@@ -418,7 +420,24 @@ static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
/// ElementCount to include loops whose trip count is a function of vscale.
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE,
const Loop *L) {
- return ElementCount::getFixed(SE->getSmallConstantTripCount(L));
+ if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
+ return ElementCount::getFixed(ExpectedTC);
+
+ const SCEV *BTC = SE->getBackedgeTakenCount(L);
+ if (isa<SCEVCouldNotCompute>(BTC))
+ return ElementCount::getFixed(0);
+
+ const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
+ if (isa<SCEVVScale>(ExitCount))
+ return ElementCount::getScalable(1);
+
+ const APInt *Scale;
+ if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
+ if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
+ if (Scale->getActiveBits() <= 32)
+ return ElementCount::getScalable(Scale->getZExtValue());
+
+ return ElementCount::getFixed(0);
}
/// Returns "best known" trip count, which is either a valid positive trip count
@@ -2593,12 +2612,12 @@ static void cse(BasicBlock *BB) {
}
}
-/// This function attempts to return a value that represents the vectorization
-/// factor at runtime. For fixed-width VFs we know this precisely at compile
+/// This function attempts to return a value that represents the ElementCount
+/// at runtime. For fixed-width VFs we know this precisely at compile
/// time, but for scalable VFs we calculate it based on an estimate of the
/// vscale value.
-static unsigned getEstimatedRuntimeVF(ElementCount VF,
- std::optional<unsigned> VScale) {
+static unsigned estimateElementCount(ElementCount VF,
+ std::optional<unsigned> VScale) {
unsigned EstimatedVF = VF.getKnownMinValue();
if (VF.isScalable())
if (VScale)
@@ -2708,7 +2727,7 @@ void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
// use the value of vscale used for tuning.
Loop *VectorLoop = LI->getLoopFor(HeaderBB);
unsigned EstimatedVFxUF =
- getEstimatedRuntimeVF(VF * UF, Cost->getVScaleForTuning());
+ estimateElementCount(VF * UF, Cost->getVScaleForTuning());
setProfileInfoAfterUnrolling(OrigLoop, VectorLoop, OrigLoop, EstimatedVFxUF);
}
@@ -3003,7 +3022,7 @@ bool LoopVectorizationCostModel::isPredicatedInst(Instruction *I) const {
// is correct. The easiest form of the later is to require that all values
// stored are the same.
return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
- Legal->isInvariant(cast<StoreInst>(I)->getValueOperand()));
+ TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
}
case Instruction::UDiv:
case Instruction::SDiv:
@@ -3131,7 +3150,7 @@ bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
!isScalarEpilogueAllowed();
bool StoreAccessWithGapsRequiresMasking =
- isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
+ isa<StoreInst>(I) && !Group->isFull();
if (!PredicatedAccessRequiresMasking &&
!LoadAccessWithGapsRequiresEpilogMasking &&
!StoreAccessWithGapsRequiresMasking)
@@ -4337,7 +4356,7 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
VectorizationFactor Candidate(VF, C, ScalarCost.ScalarCost);
unsigned Width =
- getEstimatedRuntimeVF(Candidate.Width, CM.getVScaleForTuning());
+ estimateElementCount(Candidate.Width, CM.getVScaleForTuning());
LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << VF
<< " costs: " << (Candidate.Cost / Width));
if (VF.isScalable())
@@ -4445,7 +4464,7 @@ bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
? EpilogueVectorizationMinVF
: TTI.getEpilogueVectorizationMinVF();
- return getEstimatedRuntimeVF(VF * Multiplier, VScaleForTuning) >=
+ return estimateElementCount(VF * Multiplier, VScaleForTuning) >=
MinVFThreshold;
}
@@ -4498,7 +4517,7 @@ VectorizationFactor LoopVectorizationPlanner::selectEpilogueVectorizationFactor(
// the main loop handles 8 lanes per iteration. We could still benefit from
// vectorizing the epilogue loop with VF=4.
ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
- getEstimatedRuntimeVF(MainLoopVF, CM.getVScaleForTuning()));
+ estimateElementCount(MainLoopVF, CM.getVScaleForTuning()));
ScalarEvolution &SE = *PSE.getSE();
Type *TCType = Legal->getWidestInductionType();
@@ -4745,16 +4764,20 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
}
- unsigned EstimatedVF = getEstimatedRuntimeVF(VF, VScaleForTuning);
-
// Try to get the exact trip count, or an estimate based on profiling data or
// ConstantMax from PSE, failing that.
- if (auto BestKnownTC = getSmallBestKnownTC(PSE, TheLoop)) {
+ auto BestKnownTC = getSmallBestKnownTC(PSE, TheLoop);
+
+ // For fixed length VFs treat a scalable trip count as unknown.
+ if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
+ // Re-evaluate trip counts and VFs to be in the same numerical space.
+ unsigned AvailableTC = estimateElementCount(*BestKnownTC, VScaleForTuning);
+ unsigned EstimatedVF = estimateElementCount(VF, VScaleForTuning);
+
// At least one iteration must be scalar when this constraint holds. So the
// maximum available iterations for interleaving is one less.
- unsigned AvailableTC = requiresScalarEpilogue(VF.isVector())
- ? BestKnownTC->getFixedValue() - 1
- : BestKnownTC->getFixedValue();
+ if (requiresScalarEpilogue(VF.isVector()))
+ --AvailableTC;
unsigned InterleaveCountLB = bit_floor(std::max(
1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
@@ -5349,7 +5372,7 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
// Calculate the cost of the whole interleaved group.
bool UseMaskForGaps =
(Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
- (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
+ (isa<StoreInst>(I) && !Group->isFull());
InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
@@ -6925,7 +6948,7 @@ InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
// Now compute and add the VPlan-based cost.
Cost += Plan.cost(VF, CostCtx);
#ifndef NDEBUG
- unsigned EstimatedWidth = getEstimatedRuntimeVF(VF, CM.getVScaleForTuning());
+ unsigned EstimatedWidth = estimateElementCount(VF, CM.getVScaleForTuning());
LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
<< " (Estimated cost per lane: ");
if (Cost.isValid()) {
@@ -7292,6 +7315,8 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan(
// Regions are dissolved after optimizing for VF and UF, which completely
// removes unneeded loop regions first.
VPlanTransforms::dissolveLoopRegions(BestVPlan);
+ // Canonicalize EVL loops after regions are dissolved.
+ VPlanTransforms::canonicalizeEVLLoops(BestVPlan);
// Perform the actual loop transformation.
VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
OrigLoop->getParentLoop(),
@@ -9611,7 +9636,7 @@ static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
// For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
// the computations are performed on doubles, not integers and the result
// is rounded up, hence we get an upper estimate of the TC.
- unsigned IntVF = getEstimatedRuntimeVF(VF.Width, VScale);
+ unsigned IntVF = estimateElementCount(VF.Width, VScale);
uint64_t RtC = TotalCost.getValue();
uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
uint64_t MinTC1 = Div == 0 ? 0 : divideCeil(RtC * IntVF, Div);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 68e7c20..d249a34 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -2530,8 +2530,8 @@ void VPReductionRecipe::execute(VPTransformState &State) {
NextInChain = createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain);
else
NextInChain = State.Builder.CreateBinOp(
- (Instruction::BinaryOps)RecurrenceDescriptor::getOpcode(Kind), NewRed,
- PrevInChain);
+ (Instruction::BinaryOps)RecurrenceDescriptor::getOpcode(Kind),
+ PrevInChain, NewRed);
}
State.set(this, NextInChain, /*IsScalar*/ true);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 8de05c1..18d331b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -32,11 +32,11 @@
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
-#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/TypeSize.h"
using namespace llvm;
+using namespace VPlanPatternMatch;
bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
VPlanPtr &Plan,
@@ -528,13 +528,11 @@ static void removeRedundantCanonicalIVs(VPlan &Plan) {
/// Returns true if \p R is dead and can be removed.
static bool isDeadRecipe(VPRecipeBase &R) {
- using namespace llvm::PatternMatch;
// Do remove conditional assume instructions as their conditions may be
// flattened.
auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
- bool IsConditionalAssume =
- RepR && RepR->isPredicated() &&
- match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>());
+ bool IsConditionalAssume = RepR && RepR->isPredicated() &&
+ match(RepR, m_Intrinsic<Intrinsic::assume>());
if (IsConditionalAssume)
return true;
@@ -625,7 +623,6 @@ static SmallVector<VPUser *> collectUsersRecursively(VPValue *V) {
/// original IV's users. This is an optional optimization to reduce the needs of
/// vector extracts.
static void legalizeAndOptimizeInductions(VPlan &Plan) {
- using namespace llvm::VPlanPatternMatch;
VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
bool HasOnlyVectorVFs = !Plan.hasScalarVFOnly();
VPBuilder Builder(HeaderVPBB, HeaderVPBB->getFirstNonPhi());
@@ -727,7 +724,6 @@ static VPWidenInductionRecipe *getOptimizableIVOf(VPValue *VPV) {
return nullptr;
auto IsWideIVInc = [&]() {
- using namespace VPlanPatternMatch;
auto &ID = WideIV->getInductionDescriptor();
// Check if VPV increments the induction by the induction step.
@@ -771,8 +767,6 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan,
VPTypeAnalysis &TypeInfo,
VPBlockBase *PredVPBB,
VPValue *Op) {
- using namespace VPlanPatternMatch;
-
VPValue *Incoming, *Mask;
if (!match(Op, m_VPInstruction<VPInstruction::ExtractLane>(
m_VPInstruction<VPInstruction::FirstActiveLane>(
@@ -827,8 +821,6 @@ static VPValue *
optimizeLatchExitInductionUser(VPlan &Plan, VPTypeAnalysis &TypeInfo,
VPBlockBase *PredVPBB, VPValue *Op,
DenseMap<VPValue *, VPValue *> &EndValues) {
- using namespace VPlanPatternMatch;
-
VPValue *Incoming;
if (!match(Op, m_VPInstruction<VPInstruction::ExtractLastElement>(
m_VPValue(Incoming))))
@@ -986,7 +978,6 @@ static Value *tryToFoldLiveIns(const VPRecipeBase &R, unsigned Opcode,
/// Try to simplify recipe \p R.
static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
- using namespace llvm::VPlanPatternMatch;
VPlan *Plan = R.getParent()->getPlan();
auto *Def = dyn_cast<VPSingleDefRecipe>(&R);
@@ -1269,7 +1260,6 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) {
/// Normalize and simplify VPBlendRecipes. Should be run after simplifyRecipes
/// to make sure the masks are simplified.
static void simplifyBlends(VPlan &Plan) {
- using namespace llvm::VPlanPatternMatch;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
@@ -1393,7 +1383,6 @@ static bool optimizeVectorInductionWidthForTCAndVFUF(VPlan &Plan,
// Currently only handle cases where the single user is a header-mask
// comparison with the backedge-taken-count.
- using namespace VPlanPatternMatch;
if (!match(
*WideIV->user_begin(),
m_Binary<Instruction::ICmp>(
@@ -1424,7 +1413,6 @@ static bool optimizeVectorInductionWidthForTCAndVFUF(VPlan &Plan,
static bool isConditionTrueViaVFAndUF(VPValue *Cond, VPlan &Plan,
ElementCount BestVF, unsigned BestUF,
ScalarEvolution &SE) {
- using namespace llvm::VPlanPatternMatch;
if (match(Cond, m_Binary<Instruction::Or>(m_VPValue(), m_VPValue())))
return any_of(Cond->getDefiningRecipe()->operands(), [&Plan, BestVF, BestUF,
&SE](VPValue *C) {
@@ -1464,7 +1452,6 @@ static bool simplifyBranchConditionForVFAndUF(VPlan &Plan, ElementCount BestVF,
auto *Term = &ExitingVPBB->back();
VPValue *Cond;
ScalarEvolution &SE = *PSE.getSE();
- using namespace llvm::VPlanPatternMatch;
if (match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) ||
match(Term, m_BranchOnCond(
m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue()))))) {
@@ -1496,11 +1483,11 @@ static bool simplifyBranchConditionForVFAndUF(VPlan &Plan, ElementCount BestVF,
auto *CanIVTy = Plan.getCanonicalIV()->getScalarType();
if (all_of(Header->phis(),
IsaPred<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe,
- VPFirstOrderRecurrencePHIRecipe>)) {
+ VPFirstOrderRecurrencePHIRecipe, VPPhi>)) {
for (VPRecipeBase &HeaderR : make_early_inc_range(Header->phis())) {
- auto *HeaderPhiR = cast<VPHeaderPHIRecipe>(&HeaderR);
- HeaderPhiR->replaceAllUsesWith(HeaderPhiR->getStartValue());
- HeaderPhiR->eraseFromParent();
+ auto *Phi = cast<VPPhiAccessors>(&HeaderR);
+ HeaderR.getVPSingleValue()->replaceAllUsesWith(Phi->getIncomingValue(0));
+ HeaderR.eraseFromParent();
}
VPBlockBase *Preheader = VectorRegion->getSinglePredecessor();
@@ -1847,7 +1834,6 @@ void VPlanTransforms::truncateToMinimalBitwidths(
if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
VPW->dropPoisonGeneratingFlags();
- using namespace llvm::VPlanPatternMatch;
if (OldResSizeInBits != NewResSizeInBits &&
!match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue()))) {
// Extend result to original width.
@@ -1897,7 +1883,6 @@ void VPlanTransforms::truncateToMinimalBitwidths(
}
void VPlanTransforms::removeBranchOnConst(VPlan &Plan) {
- using namespace llvm::VPlanPatternMatch;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_shallow(Plan.getEntry()))) {
VPValue *Cond;
@@ -2143,7 +2128,6 @@ static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask,
VPRecipeBase &CurRecipe,
VPTypeAnalysis &TypeInfo,
VPValue &AllOneMask, VPValue &EVL) {
- using namespace llvm::VPlanPatternMatch;
auto GetNewMask = [&](VPValue *OrigMask) -> VPValue * {
assert(OrigMask && "Unmasked recipe when folding tail");
// HeaderMask will be handled using EVL.
@@ -2223,7 +2207,6 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry()))) {
for (VPRecipeBase &R : *VPBB) {
- using namespace VPlanPatternMatch;
VPValue *V1, *V2;
if (!match(&R,
m_VPInstruction<VPInstruction::FirstOrderRecurrenceSplice>(
@@ -2244,6 +2227,9 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
// Try to optimize header mask recipes away to their EVL variants.
for (VPValue *HeaderMask : collectAllHeaderMasks(Plan)) {
+ // TODO: Split optimizeMaskToEVL out and move into
+ // VPlanTransforms::optimize. transformRecipestoEVLRecipes should be run in
+ // tryToBuildVPlanWithVPRecipes beforehand.
for (VPUser *U : collectUsersRecursively(HeaderMask)) {
auto *CurRecipe = cast<VPRecipeBase>(U);
VPRecipeBase *EVLRecipe =
@@ -2265,6 +2251,20 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
}
ToErase.push_back(CurRecipe);
}
+
+ // Replace header masks with a mask equivalent to predicating by EVL:
+ //
+ // icmp ule widen-canonical-iv backedge-taken-count
+ // ->
+ // icmp ult step-vector, EVL
+ VPRecipeBase *EVLR = EVL.getDefiningRecipe();
+ VPBuilder Builder(EVLR->getParent(), std::next(EVLR->getIterator()));
+ Type *EVLType = TypeInfo.inferScalarType(&EVL);
+ VPValue *EVLMask = Builder.createICmp(
+ CmpInst::ICMP_ULT,
+ Builder.createNaryOp(VPInstruction::StepVector, {}, EVLType), &EVL);
+ HeaderMask->replaceAllUsesWith(EVLMask);
+ ToErase.push_back(HeaderMask->getDefiningRecipe());
}
for (VPRecipeBase *R : reverse(ToErase)) {
@@ -2292,10 +2292,12 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
/// ...
/// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
/// [ %NextEVLIV, %vector.body ]
-/// %AVL = sub original TC, %EVLPhi
+/// %AVL = phi [ trip-count, %vector.ph ], [ %NextAVL, %vector.body ]
/// %VPEVL = EXPLICIT-VECTOR-LENGTH %AVL
/// ...
-/// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
+/// %OpEVL = cast i32 %VPEVL to IVSize
+/// %NextEVLIV = add IVSize %OpEVL, %EVLPhi
+/// %NextAVL = sub IVSize nuw %AVL, %OpEVL
/// ...
///
/// If MaxSafeElements is provided, the function adds the following recipes:
@@ -2306,12 +2308,14 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
/// ...
/// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
/// [ %NextEVLIV, %vector.body ]
-/// %AVL = sub original TC, %EVLPhi
+/// %AVL = phi [ trip-count, %vector.ph ], [ %NextAVL, %vector.body ]
/// %cmp = cmp ult %AVL, MaxSafeElements
/// %SAFE_AVL = select %cmp, %AVL, MaxSafeElements
/// %VPEVL = EXPLICIT-VECTOR-LENGTH %SAFE_AVL
/// ...
-/// %NextEVLIV = add IVSize (cast i32 %VPEVL to IVSize), %EVLPhi
+/// %OpEVL = cast i32 %VPEVL to IVSize
+/// %NextEVLIV = add IVSize %OpEVL, %EVLPhi
+/// %NextAVL = sub IVSize nuw %AVL, %OpEVL
/// ...
///
bool VPlanTransforms::tryAddExplicitVectorLength(
@@ -2333,9 +2337,12 @@ bool VPlanTransforms::tryAddExplicitVectorLength(
auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc());
EVLPhi->insertAfter(CanonicalIVPHI);
VPBuilder Builder(Header, Header->getFirstNonPhi());
- // Compute original TC - IV as the AVL (application vector length).
- VPValue *AVL = Builder.createNaryOp(
- Instruction::Sub, {Plan.getTripCount(), EVLPhi}, DebugLoc(), "avl");
+ // Create the AVL (application vector length), starting from TC -> 0 in steps
+ // of EVL.
+ VPPhi *AVLPhi = Builder.createScalarPhi(
+ {Plan.getTripCount()}, DebugLoc::getCompilerGenerated(), "avl");
+ VPValue *AVL = AVLPhi;
+
if (MaxSafeElements) {
// Support for MaxSafeDist for correct loop emission.
VPValue *AVLSafe =
@@ -2362,6 +2369,11 @@ bool VPlanTransforms::tryAddExplicitVectorLength(
CanonicalIVIncrement->getDebugLoc(), "index.evl.next");
EVLPhi->addOperand(NextEVLIV);
+ VPValue *NextAVL = Builder.createOverflowingOp(
+ Instruction::Sub, {AVLPhi, OpVPEVL}, {/*hasNUW=*/true, /*hasNSW=*/false},
+ DebugLoc::getCompilerGenerated(), "avl.next");
+ AVLPhi->addOperand(NextAVL);
+
transformRecipestoEVLRecipes(Plan, *VPEVL);
// Replace all uses of VPCanonicalIVPHIRecipe by
@@ -2373,6 +2385,65 @@ bool VPlanTransforms::tryAddExplicitVectorLength(
return true;
}
+void VPlanTransforms::canonicalizeEVLLoops(VPlan &Plan) {
+ // Find EVL loop entries by locating VPEVLBasedIVPHIRecipe.
+ // There should be only one EVL PHI in the entire plan.
+ VPEVLBasedIVPHIRecipe *EVLPhi = nullptr;
+
+ for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
+ vp_depth_first_shallow(Plan.getEntry())))
+ for (VPRecipeBase &R : VPBB->phis())
+ if (auto *PhiR = dyn_cast<VPEVLBasedIVPHIRecipe>(&R)) {
+ assert(!EVLPhi && "Found multiple EVL PHIs. Only one expected");
+ EVLPhi = PhiR;
+ }
+
+ // Early return if no EVL PHI is found.
+ if (!EVLPhi)
+ return;
+
+ VPBasicBlock *HeaderVPBB = EVLPhi->getParent();
+ VPValue *EVLIncrement = EVLPhi->getBackedgeValue();
+
+ // Convert EVLPhi to concrete recipe.
+ auto *ScalarR =
+ VPBuilder(EVLPhi).createScalarPhi({EVLPhi->getStartValue(), EVLIncrement},
+ EVLPhi->getDebugLoc(), "evl.based.iv");
+ EVLPhi->replaceAllUsesWith(ScalarR);
+ EVLPhi->eraseFromParent();
+
+ // Replace CanonicalIVInc with EVL-PHI increment.
+ auto *CanonicalIV = cast<VPPhi>(&*HeaderVPBB->begin());
+ VPValue *Backedge = CanonicalIV->getIncomingValue(1);
+ assert(match(Backedge,
+ m_c_Binary<Instruction::Add>(m_Specific(CanonicalIV),
+ m_Specific(&Plan.getVFxUF()))) &&
+ "Unexpected canonical iv");
+ Backedge->replaceAllUsesWith(EVLIncrement);
+
+ // Remove unused phi and increment.
+ VPRecipeBase *CanonicalIVIncrement = Backedge->getDefiningRecipe();
+ CanonicalIVIncrement->eraseFromParent();
+ CanonicalIV->eraseFromParent();
+
+ // Replace the use of VectorTripCount in the latch-exiting block.
+ // Before: (branch-on-count EVLIVInc, VectorTripCount)
+ // After: (branch-on-count EVLIVInc, TripCount)
+
+ VPBasicBlock *LatchExiting =
+ HeaderVPBB->getPredecessors()[1]->getEntryBasicBlock();
+ auto *LatchExitingBr = cast<VPInstruction>(LatchExiting->getTerminator());
+ // Skip single-iteration loop region
+ if (match(LatchExitingBr, m_BranchOnCond(m_True())))
+ return;
+ assert(LatchExitingBr &&
+ match(LatchExitingBr,
+ m_BranchOnCount(m_VPValue(EVLIncrement),
+ m_Specific(&Plan.getVectorTripCount()))) &&
+ "Unexpected terminator in EVL loop");
+ LatchExitingBr->setOperand(1, Plan.getTripCount());
+}
+
void VPlanTransforms::dropPoisonGeneratingRecipes(
VPlan &Plan,
const std::function<bool(BasicBlock *)> &BlockNeedsPredication) {
@@ -2403,7 +2474,6 @@ void VPlanTransforms::dropPoisonGeneratingRecipes(
// drop them directly.
if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
VPValue *A, *B;
- using namespace llvm::VPlanPatternMatch;
// Dropping disjoint from an OR may yield incorrect results, as some
// analysis may have converted it to an Add implicitly (e.g. SCEV used
// for dependence analysis). Instead, replace it with an equivalent Add.
@@ -2697,22 +2767,11 @@ void VPlanTransforms::dissolveLoopRegions(VPlan &Plan) {
void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan,
Type &CanonicalIVTy) {
- using namespace llvm::VPlanPatternMatch;
-
VPTypeAnalysis TypeInfo(&CanonicalIVTy);
SmallVector<VPRecipeBase *> ToRemove;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_deep(Plan.getEntry()))) {
for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
- if (auto *PhiR = dyn_cast<VPEVLBasedIVPHIRecipe>(&R)) {
- auto *ScalarR = VPBuilder(PhiR).createScalarPhi(
- {PhiR->getStartValue(), PhiR->getBackedgeValue()},
- PhiR->getDebugLoc(), "evl.based.iv");
- PhiR->replaceAllUsesWith(ScalarR);
- ToRemove.push_back(PhiR);
- continue;
- }
-
if (auto *WidenIVR = dyn_cast<VPWidenIntOrFpInductionRecipe>(&R)) {
expandVPWidenIntOrFpInduction(WidenIVR, TypeInfo);
ToRemove.push_back(WidenIVR);
@@ -2784,8 +2843,6 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan,
void VPlanTransforms::handleUncountableEarlyExit(
VPBasicBlock *EarlyExitingVPBB, VPBasicBlock *EarlyExitVPBB, VPlan &Plan,
VPBasicBlock *HeaderVPBB, VPBasicBlock *LatchVPBB, VFRange &Range) {
- using namespace llvm::VPlanPatternMatch;
-
VPBlockBase *MiddleVPBB = LatchVPBB->getSuccessors()[0];
if (!EarlyExitVPBB->getSinglePredecessor() &&
EarlyExitVPBB->getPredecessors()[1] == MiddleVPBB) {
@@ -2879,8 +2936,6 @@ void VPlanTransforms::handleUncountableEarlyExit(
static VPExpressionRecipe *
tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx,
VFRange &Range) {
- using namespace VPlanPatternMatch;
-
Type *RedTy = Ctx.Types.inferScalarType(Red);
VPValue *VecOp = Red->getVecOp();
@@ -2926,8 +2981,6 @@ tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx,
static VPExpressionRecipe *
tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red,
VPCostContext &Ctx, VFRange &Range) {
- using namespace VPlanPatternMatch;
-
unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind());
if (Opcode != Instruction::Add)
return nullptr;
@@ -3141,9 +3194,7 @@ static bool canNarrowLoad(VPWidenRecipe *WideMember0, unsigned OpIdx,
return !W->getMask() && WideMember0->getOperand(OpIdx) == OpV;
if (auto *IR = dyn_cast<VPInterleaveRecipe>(DefR))
- return IR->getInterleaveGroup()->getFactor() ==
- IR->getInterleaveGroup()->getNumMembers() &&
- IR->getVPValue(Idx) == OpV;
+ return IR->getInterleaveGroup()->isFull() && IR->getVPValue(Idx) == OpV;
return false;
}
@@ -3190,7 +3241,6 @@ static bool isAlreadyNarrow(VPValue *VPV) {
void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
unsigned VectorRegWidth) {
- using namespace llvm::VPlanPatternMatch;
VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
if (VF.isScalable() || !VectorLoop)
return;
@@ -3260,9 +3310,7 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
if (!DefR)
return false;
auto *IR = dyn_cast<VPInterleaveRecipe>(DefR);
- return IR &&
- IR->getInterleaveGroup()->getFactor() ==
- IR->getInterleaveGroup()->getNumMembers() &&
+ return IR && IR->getInterleaveGroup()->isFull() &&
IR->getVPValue(Op.index()) == Op.value();
})) {
StoreGroups.push_back(InterleaveR);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index d5af6cd..880159f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -209,6 +209,18 @@ struct VPlanTransforms {
/// Replace loop regions with explicit CFG.
static void dissolveLoopRegions(VPlan &Plan);
+ /// Transform EVL loops to use variable-length stepping after region
+ /// dissolution.
+ ///
+ /// Once loop regions are replaced with explicit CFG, EVL loops can step with
+ /// variable vector lengths instead of fixed lengths. This transformation:
+ /// * Makes EVL-Phi concrete.
+ // * Removes CanonicalIV and increment.
+ /// * Replaces fixed-length stepping (branch-on-cond CanonicalIVInc,
+ /// VectorTripCount) with variable-length stepping (branch-on-cond
+ /// EVLIVInc, TripCount).
+ static void canonicalizeEVLLoops(VPlan &Plan);
+
/// Lower abstract recipes to concrete ones, that can be codegen'd. Use \p
/// CanonicalIVTy as type for all un-typed live-ins in VPTypeAnalysis.
static void convertToConcreteRecipes(VPlan &Plan, Type &CanonicalIVTy);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
index 38ada33..14ae4f2 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
@@ -17,6 +17,7 @@
#include "VPlanCFG.h"
#include "VPlanDominatorTree.h"
#include "VPlanHelpers.h"
+#include "VPlanPatternMatch.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/TypeSwitch.h"
@@ -78,9 +79,8 @@ bool VPlanVerifier::verifyPhiRecipes(const VPBasicBlock *VPBB) {
if (isa<VPActiveLaneMaskPHIRecipe>(RecipeI))
NumActiveLaneMaskPhiRecipes++;
- if (IsHeaderVPBB && !isa<VPHeaderPHIRecipe, VPWidenPHIRecipe>(*RecipeI) &&
- !isa<VPInstruction>(*RecipeI) &&
- cast<VPInstruction>(RecipeI)->getOpcode() == Instruction::PHI) {
+ if (IsHeaderVPBB &&
+ !isa<VPHeaderPHIRecipe, VPWidenPHIRecipe, VPPhi>(*RecipeI)) {
errs() << "Found non-header PHI recipe in header VPBB";
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
errs() << ": ";
@@ -171,7 +171,9 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const {
.Case<VPInstructionWithType>(
[&](const VPInstructionWithType *S) { return VerifyEVLUse(*S, 0); })
.Case<VPInstruction>([&](const VPInstruction *I) {
- if (I->getOpcode() == Instruction::PHI)
+ if (I->getOpcode() == Instruction::PHI ||
+ I->getOpcode() == Instruction::ICmp ||
+ I->getOpcode() == Instruction::Sub)
return VerifyEVLUse(*I, 1);
switch (I->getOpcode()) {
case Instruction::Add:
@@ -192,7 +194,13 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const {
errs() << "EVL used by unexpected VPInstruction\n";
return false;
}
- if (I->getNumUsers() != 1) {
+ // EVLIVIncrement is only used by EVLIV & BranchOnCount.
+ // Having more than two users is unexpected.
+ if ((I->getNumUsers() != 1) &&
+ (I->getNumUsers() != 2 || none_of(I->users(), [&I](VPUser *U) {
+ using namespace llvm::VPlanPatternMatch;
+ return match(U, m_BranchOnCount(m_Specific(I), m_VPValue()));
+ }))) {
errs() << "EVL is used in VPInstruction with multiple users\n";
return false;
}
diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 6252f4f..6345b18 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -1664,6 +1664,8 @@ static Align computeAlignmentAfterScalarization(Align VectorAlignment,
// %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1
// store i32 %b, i32* %1
bool VectorCombine::foldSingleElementStore(Instruction &I) {
+ if (!TTI.allowVectorElementIndexingUsingGEP())
+ return false;
auto *SI = cast<StoreInst>(&I);
if (!SI->isSimple() || !isa<VectorType>(SI->getValueOperand()->getType()))
return false;
@@ -1719,6 +1721,9 @@ bool VectorCombine::foldSingleElementStore(Instruction &I) {
/// Try to scalarize vector loads feeding extractelement instructions.
bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
+ if (!TTI.allowVectorElementIndexingUsingGEP())
+ return false;
+
Value *Ptr;
if (!match(&I, m_Load(m_Value(Ptr))))
return false;
@@ -1827,6 +1832,8 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
}
bool VectorCombine::scalarizeExtExtract(Instruction &I) {
+ if (!TTI.allowVectorElementIndexingUsingGEP())
+ return false;
auto *Ext = dyn_cast<ZExtInst>(&I);
if (!Ext)
return false;
diff --git a/llvm/test/Analysis/CostModel/ARM/arith-overflow.ll b/llvm/test/Analysis/CostModel/ARM/arith-overflow.ll
index 9c2d6bc..adb1a72 100644
--- a/llvm/test/Analysis/CostModel/ARM/arith-overflow.ll
+++ b/llvm/test/Analysis/CostModel/ARM/arith-overflow.ll
@@ -1,10 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8m.main < %s | FileCheck %s --check-prefix=V8M-RECIP
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=armv8a-linux-gnueabihf < %s | FileCheck %s --check-prefix=NEON-RECIP
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=armv8.1m.main -mattr=+mve < %s | FileCheck %s --check-prefix=MVE-RECIP
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=thumbv8m.main < %s | FileCheck %s --check-prefix=V8M-SIZE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=armv8a-linux-gnueabihf < %s | FileCheck %s --check-prefix=NEON-SIZE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=armv8.1m.main -mattr=+mve < %s | FileCheck %s --check-prefix=MVE-SIZE
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8m.main < %s | FileCheck %s --check-prefix=V8M
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=armv8a-linux-gnueabihf < %s | FileCheck %s --check-prefix=NEON
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=armv8.1m.main -mattr=+mve < %s | FileCheck %s --check-prefix=MVE
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
@@ -29,119 +26,62 @@ declare {<32 x i8>, <32 x i1>} @llvm.sadd.with.overflow.v32i8(<32 x i8>, <32 x
declare {<64 x i8>, <64 x i1>} @llvm.sadd.with.overflow.v64i8(<64 x i8>, <64 x i8>)
define i32 @sadd(i32 %arg) {
-; V8M-RECIP-LABEL: 'sadd'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-RECIP-LABEL: 'sadd'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; MVE-RECIP-LABEL: 'sadd'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 126 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 218 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 434 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 78 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 154 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 284 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 272 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 544 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'sadd'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-SIZE-LABEL: 'sadd'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; MVE-SIZE-LABEL: 'sadd'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 92 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 145 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 289 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 141 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 264 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 528 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'sadd'
+; V8M-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of 8 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of 8 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 64 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 64 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 128 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
+;
+; NEON-LABEL: 'sadd'
+; NEON-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:7 CodeSize:6 Lat:6 SizeLat:6 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:13 CodeSize:10 Lat:10 SizeLat:10 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:7 CodeSize:6 Lat:6 SizeLat:6 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:13 CodeSize:10 Lat:10 SizeLat:10 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:7 CodeSize:6 Lat:6 SizeLat:6 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:14 CodeSize:10 Lat:10 SizeLat:10 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:6 Lat:6 SizeLat:6 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:16 CodeSize:10 Lat:10 SizeLat:10 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
+;
+; MVE-LABEL: 'sadd'
+; MVE-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:126 CodeSize:92 Lat:126 SizeLat:126 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:218 CodeSize:145 Lat:218 SizeLat:218 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:434 CodeSize:289 Lat:434 SizeLat:434 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:78 CodeSize:71 Lat:78 SizeLat:78 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:154 CodeSize:141 Lat:154 SizeLat:154 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:142 CodeSize:135 Lat:142 SizeLat:142 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:284 CodeSize:270 Lat:284 SizeLat:284 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:272 CodeSize:264 Lat:272 SizeLat:272 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:544 CodeSize:528 Lat:544 SizeLat:544 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
%V2I64 = call {<2 x i64>, <2 x i1>} @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
@@ -187,119 +127,62 @@ declare {<32 x i8>, <32 x i1>} @llvm.uadd.with.overflow.v32i8(<32 x i8>, <32 x
declare {<64 x i8>, <64 x i1>} @llvm.uadd.with.overflow.v64i8(<64 x i8>, <64 x i8>)
define i32 @uadd(i32 %arg) {
-; V8M-RECIP-LABEL: 'uadd'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.uadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.uadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.uadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.uadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.uadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.uadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.uadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.uadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.uadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.uadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-RECIP-LABEL: 'uadd'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.uadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.uadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.uadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.uadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.uadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.uadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.uadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.uadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.uadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.uadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; MVE-RECIP-LABEL: 'uadd'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 144 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.uadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 288 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.uadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.uadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.uadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.uadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.uadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 144 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.uadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.uadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.uadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 272 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.uadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'uadd'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.uadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.uadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.uadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.uadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.uadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.uadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.uadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.uadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.uadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.uadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-SIZE-LABEL: 'uadd'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.uadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.uadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.uadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.uadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.uadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.uadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.uadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.uadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.uadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.uadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; MVE-SIZE-LABEL: 'uadd'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 108 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.uadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 216 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.uadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.uadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.uadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.uadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.uadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.uadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.uadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 132 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.uadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 264 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.uadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'uadd'
+; V8M-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of 8 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.uadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.uadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of 8 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.uadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.uadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.uadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.uadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 64 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.uadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.uadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 64 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.uadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 128 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.uadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
+;
+; NEON-LABEL: 'uadd'
+; NEON-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:4 CodeSize:3 Lat:3 SizeLat:3 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.uadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:5 SizeLat:5 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.uadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.uadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:4 CodeSize:3 Lat:3 SizeLat:3 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:5 SizeLat:5 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.uadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.uadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:4 CodeSize:3 Lat:3 SizeLat:3 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.uadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:5 SizeLat:5 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.uadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.uadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:4 CodeSize:3 Lat:3 SizeLat:3 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.uadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:5 SizeLat:5 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.uadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
+;
+; MVE-LABEL: 'uadd'
+; MVE-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:72 CodeSize:54 Lat:72 SizeLat:72 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:144 CodeSize:108 Lat:144 SizeLat:144 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.uadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:288 CodeSize:216 Lat:288 SizeLat:288 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.uadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.uadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:40 CodeSize:36 Lat:40 SizeLat:40 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:80 CodeSize:72 Lat:80 SizeLat:80 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.uadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.uadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:72 CodeSize:68 Lat:72 SizeLat:72 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.uadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:144 CodeSize:136 Lat:144 SizeLat:144 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.uadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.uadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:136 CodeSize:132 Lat:136 SizeLat:136 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.uadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:272 CodeSize:264 Lat:272 SizeLat:272 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.uadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
%V2I64 = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
@@ -345,119 +228,62 @@ declare {<32 x i8>, <32 x i1>} @llvm.ssub.with.overflow.v32i8(<32 x i8>, <32 x
declare {<64 x i8>, <64 x i1>} @llvm.ssub.with.overflow.v64i8(<64 x i8>, <64 x i8>)
define i32 @ssub(i32 %arg) {
-; V8M-RECIP-LABEL: 'ssub'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-RECIP-LABEL: 'ssub'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; MVE-RECIP-LABEL: 'ssub'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 126 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 218 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 434 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 78 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 154 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 284 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 272 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 544 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'ssub'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-SIZE-LABEL: 'ssub'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; MVE-SIZE-LABEL: 'ssub'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 92 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 145 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 289 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 141 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 264 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 528 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'ssub'
+; V8M-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of 8 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of 8 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 64 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 64 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 128 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
+;
+; NEON-LABEL: 'ssub'
+; NEON-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:7 CodeSize:6 Lat:6 SizeLat:6 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:13 CodeSize:10 Lat:10 SizeLat:10 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:7 CodeSize:6 Lat:6 SizeLat:6 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:13 CodeSize:10 Lat:10 SizeLat:10 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:7 CodeSize:6 Lat:6 SizeLat:6 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:14 CodeSize:10 Lat:10 SizeLat:10 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:6 Lat:6 SizeLat:6 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:16 CodeSize:10 Lat:10 SizeLat:10 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
+;
+; MVE-LABEL: 'ssub'
+; MVE-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:126 CodeSize:92 Lat:126 SizeLat:126 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:218 CodeSize:145 Lat:218 SizeLat:218 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:434 CodeSize:289 Lat:434 SizeLat:434 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:78 CodeSize:71 Lat:78 SizeLat:78 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:154 CodeSize:141 Lat:154 SizeLat:154 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:142 CodeSize:135 Lat:142 SizeLat:142 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:284 CodeSize:270 Lat:284 SizeLat:284 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:272 CodeSize:264 Lat:272 SizeLat:272 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:544 CodeSize:528 Lat:544 SizeLat:544 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
%V2I64 = call {<2 x i64>, <2 x i1>} @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
@@ -503,119 +329,62 @@ declare {<32 x i8>, <32 x i1>} @llvm.usub.with.overflow.v32i8(<32 x i8>, <32 x
declare {<64 x i8>, <64 x i1>} @llvm.usub.with.overflow.v64i8(<64 x i8>, <64 x i8>)
define i32 @usub(i32 %arg) {
-; V8M-RECIP-LABEL: 'usub'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.usub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.usub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.usub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.usub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.usub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.usub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.usub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.usub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.usub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.usub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-RECIP-LABEL: 'usub'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.usub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.usub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.usub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.usub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.usub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.usub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.usub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.usub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.usub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.usub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; MVE-RECIP-LABEL: 'usub'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 144 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.usub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 288 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.usub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.usub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.usub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.usub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.usub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 144 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.usub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.usub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.usub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 272 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.usub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'usub'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.usub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.usub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.usub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.usub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.usub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.usub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.usub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.usub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.usub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.usub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-SIZE-LABEL: 'usub'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.usub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.usub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.usub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.usub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.usub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.usub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.usub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.usub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.usub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.usub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; MVE-SIZE-LABEL: 'usub'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %I64 = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 108 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.usub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 216 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.usub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.usub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.usub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.usub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.usub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.usub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.usub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 132 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.usub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 264 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.usub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'usub'
+; V8M-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of 8 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.usub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.usub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of 8 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.usub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.usub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of 16 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.usub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.usub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 64 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.usub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of 32 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.usub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 64 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.usub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 128 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.usub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
+;
+; NEON-LABEL: 'usub'
+; NEON-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:4 CodeSize:3 Lat:3 SizeLat:3 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.usub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:5 SizeLat:5 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.usub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.usub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:4 CodeSize:3 Lat:3 SizeLat:3 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:5 SizeLat:5 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.usub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.usub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:4 CodeSize:3 Lat:3 SizeLat:3 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.usub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:5 SizeLat:5 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.usub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.usub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:4 CodeSize:3 Lat:3 SizeLat:3 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.usub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:5 Lat:5 SizeLat:5 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.usub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
+;
+; MVE-LABEL: 'usub'
+; MVE-NEXT: Cost Model: Found costs of 4 for: %I64 = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:72 CodeSize:54 Lat:72 SizeLat:72 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:144 CodeSize:108 Lat:144 SizeLat:144 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.usub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:288 CodeSize:216 Lat:288 SizeLat:288 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.usub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I32 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.usub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:40 CodeSize:36 Lat:40 SizeLat:40 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:80 CodeSize:72 Lat:80 SizeLat:80 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.usub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I16 = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.usub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:72 CodeSize:68 Lat:72 SizeLat:72 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.usub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:144 CodeSize:136 Lat:144 SizeLat:144 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.usub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I8 = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.usub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:136 CodeSize:132 Lat:136 SizeLat:136 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.usub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:272 CodeSize:264 Lat:272 SizeLat:272 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.usub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 undef, i64 undef)
%V2I64 = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
@@ -661,119 +430,62 @@ declare {<32 x i8>, <32 x i1>} @llvm.smul.with.overflow.v32i8(<32 x i8>, <32 x
declare {<64 x i8>, <64 x i1>} @llvm.smul.with.overflow.v64i8(<64 x i8>, <64 x i8>)
define i32 @smul(i32 %arg) {
-; V8M-RECIP-LABEL: 'smul'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %I64 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 82 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 164 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.smul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 328 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.smul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I32 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 272 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.smul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I16 = call { i16, i1 } @llvm.smul.with.overflow.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.smul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.smul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.smul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I8 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.smul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.smul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.smul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-RECIP-LABEL: 'smul'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %I64 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 83 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 162 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.smul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.smul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I32 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 38 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.smul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I16 = call { i16, i1 } @llvm.smul.with.overflow.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.smul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.smul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 92 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.smul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I8 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.smul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.smul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 92 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.smul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; MVE-RECIP-LABEL: 'smul'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %I64 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 508 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 1016 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.smul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2032 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.smul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I32 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 284 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 872 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2832 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.smul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I16 = call { i16, i1 } @llvm.smul.with.overflow.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.smul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 232 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.smul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 624 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.smul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I8 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.smul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 360 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.smul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 880 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.smul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'smul'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %I64 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.smul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 108 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.smul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I32 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 58 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 114 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.smul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I16 = call { i16, i1 } @llvm.smul.with.overflow.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.smul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 82 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.smul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 162 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.smul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I8 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 82 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.smul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 162 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.smul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 322 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.smul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-SIZE-LABEL: 'smul'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.smul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.smul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I32 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.smul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I16 = call { i16, i1 } @llvm.smul.with.overflow.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.smul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.smul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.smul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I8 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.smul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.smul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.smul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; MVE-SIZE-LABEL: 'smul'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %I64 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 74 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 144 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.smul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 284 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.smul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I32 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 150 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 328 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 652 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.smul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I16 = call { i16, i1 } @llvm.smul.with.overflow.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.smul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.smul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 282 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.smul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %I8 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.smul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.smul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 538 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.smul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'smul'
+; V8M-NEXT: Cost Model: Found costs of 15 for: %I64 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:82 CodeSize:30 Lat:30 SizeLat:30 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:164 CodeSize:56 Lat:56 SizeLat:56 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.smul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:328 CodeSize:108 Lat:108 SizeLat:108 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.smul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 8 for: %I32 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:68 CodeSize:30 Lat:30 SizeLat:30 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:136 CodeSize:58 Lat:58 SizeLat:58 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:272 CodeSize:114 Lat:114 SizeLat:114 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.smul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 6 for: %I16 = call { i16, i1 } @llvm.smul.with.overflow.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:56 CodeSize:42 Lat:42 SizeLat:42 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.smul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:112 CodeSize:82 Lat:82 SizeLat:82 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.smul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:224 CodeSize:162 Lat:162 SizeLat:162 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.smul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 6 for: %I8 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:112 CodeSize:82 Lat:82 SizeLat:82 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.smul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:224 CodeSize:162 Lat:162 SizeLat:162 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.smul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:448 CodeSize:322 Lat:322 SizeLat:322 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.smul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
+;
+; NEON-LABEL: 'smul'
+; NEON-NEXT: Cost Model: Found costs of RThru:15 CodeSize:8 Lat:8 SizeLat:8 for: %I64 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:83 CodeSize:12 Lat:12 SizeLat:12 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:162 CodeSize:13 Lat:13 SizeLat:13 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.smul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:320 CodeSize:15 Lat:15 SizeLat:15 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.smul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:6 Lat:6 SizeLat:6 for: %I32 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:21 CodeSize:10 Lat:10 SizeLat:10 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:38 CodeSize:11 Lat:11 SizeLat:11 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:72 CodeSize:13 Lat:13 SizeLat:13 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.smul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 6 for: %I16 = call { i16, i1 } @llvm.smul.with.overflow.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:23 CodeSize:8 Lat:8 SizeLat:8 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.smul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:46 CodeSize:9 Lat:9 SizeLat:9 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.smul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:92 CodeSize:11 Lat:11 SizeLat:11 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.smul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 6 for: %I8 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:23 CodeSize:8 Lat:8 SizeLat:8 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.smul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:46 CodeSize:9 Lat:9 SizeLat:9 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.smul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:92 CodeSize:11 Lat:11 SizeLat:11 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.smul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
+;
+; MVE-LABEL: 'smul'
+; MVE-NEXT: Cost Model: Found costs of 15 for: %I64 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:508 CodeSize:74 Lat:108 SizeLat:108 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:1016 CodeSize:144 Lat:212 SizeLat:212 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.smul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2032 CodeSize:284 Lat:420 SizeLat:420 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.smul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of 8 for: %I32 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:284 CodeSize:150 Lat:152 SizeLat:152 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:872 CodeSize:328 Lat:332 SizeLat:332 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2832 CodeSize:652 Lat:660 SizeLat:660 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.smul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 6 for: %I16 = call { i16, i1 } @llvm.smul.with.overflow.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:64 CodeSize:40 Lat:46 SizeLat:46 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.smul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:232 CodeSize:142 Lat:154 SizeLat:154 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.smul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:624 CodeSize:282 Lat:306 SizeLat:306 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.smul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 6 for: %I8 = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:96 CodeSize:72 Lat:78 SizeLat:78 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.smul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:360 CodeSize:270 Lat:282 SizeLat:282 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.smul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:880 CodeSize:538 Lat:562 SizeLat:562 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.smul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call {i64, i1} @llvm.smul.with.overflow.i64(i64 undef, i64 undef)
%V2I64 = call {<2 x i64>, <2 x i1>} @llvm.smul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
@@ -819,119 +531,62 @@ declare {<32 x i8>, <32 x i1>} @llvm.umul.with.overflow.v32i8(<32 x i8>, <32 x
declare {<64 x i8>, <64 x i1>} @llvm.umul.with.overflow.v64i8(<64 x i8>, <64 x i8>)
define i32 @umul(i32 %arg) {
-; V8M-RECIP-LABEL: 'umul'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %I64 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 78 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 156 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 312 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.umul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 256 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.umul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.umul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.umul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.umul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.umul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.umul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 384 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.umul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-RECIP-LABEL: 'umul'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %I64 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 77 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 154 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 308 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.umul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.umul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.umul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.umul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 84 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.umul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.umul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.umul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 84 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.umul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; MVE-RECIP-LABEL: 'umul'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %I64 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 472 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 944 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 1888 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.umul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 186 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 484 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 1288 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.umul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 62 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.umul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 228 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.umul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 616 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.umul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.umul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 356 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.umul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 872 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.umul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'umul'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %I64 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 26 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 92 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.umul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 26 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 50 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 98 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.umul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.umul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.umul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 130 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.umul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.umul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 130 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.umul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 258 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.umul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-SIZE-LABEL: 'umul'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I64 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.umul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.umul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.umul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.umul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.umul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.umul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.umul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.umul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; MVE-SIZE-LABEL: 'umul'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %I64 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 38 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.umul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 149 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 326 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 648 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.umul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 39 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.umul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.umul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 278 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.umul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.umul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 268 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.umul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 534 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.umul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'umul'
+; V8M-NEXT: Cost Model: Found costs of 13 for: %I64 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:78 CodeSize:26 Lat:26 SizeLat:26 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:156 CodeSize:48 Lat:48 SizeLat:48 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:312 CodeSize:92 Lat:92 SizeLat:92 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.umul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of 7 for: %I32 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:26 Lat:26 SizeLat:26 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:50 Lat:50 SizeLat:50 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:256 CodeSize:98 Lat:98 SizeLat:98 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.umul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of 5 for: %I16 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:48 CodeSize:34 Lat:34 SizeLat:34 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.umul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:96 CodeSize:66 Lat:66 SizeLat:66 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.umul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:192 CodeSize:130 Lat:130 SizeLat:130 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.umul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of 5 for: %I8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:96 CodeSize:66 Lat:66 SizeLat:66 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.umul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:192 CodeSize:130 Lat:130 SizeLat:130 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.umul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:384 CodeSize:258 Lat:258 SizeLat:258 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.umul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
+;
+; NEON-LABEL: 'umul'
+; NEON-NEXT: Cost Model: Found costs of RThru:13 CodeSize:7 Lat:7 SizeLat:7 for: %I64 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:77 CodeSize:7 Lat:7 SizeLat:7 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:154 CodeSize:8 Lat:8 SizeLat:8 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:308 CodeSize:10 Lat:10 SizeLat:10 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.umul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:7 CodeSize:5 Lat:5 SizeLat:5 for: %I32 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:19 CodeSize:9 Lat:9 SizeLat:9 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:34 CodeSize:10 Lat:10 SizeLat:10 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:64 CodeSize:12 Lat:12 SizeLat:12 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.umul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 5 for: %I16 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:21 CodeSize:7 Lat:7 SizeLat:7 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.umul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:42 CodeSize:8 Lat:8 SizeLat:8 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.umul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:84 CodeSize:10 Lat:10 SizeLat:10 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.umul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 5 for: %I8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:21 CodeSize:7 Lat:7 SizeLat:7 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.umul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:42 CodeSize:8 Lat:8 SizeLat:8 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.umul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:84 CodeSize:10 Lat:10 SizeLat:10 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.umul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
+;
+; MVE-LABEL: 'umul'
+; MVE-NEXT: Cost Model: Found costs of 13 for: %I64 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:472 CodeSize:38 Lat:72 SizeLat:72 for: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:944 CodeSize:72 Lat:140 SizeLat:140 for: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.umul.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:1888 CodeSize:140 Lat:276 SizeLat:276 for: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.umul.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of 7 for: %I32 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:186 CodeSize:149 Lat:150 SizeLat:150 for: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:484 CodeSize:326 Lat:328 SizeLat:328 for: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:1288 CodeSize:648 Lat:652 SizeLat:652 for: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.umul.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 5 for: %I16 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:62 CodeSize:39 Lat:44 SizeLat:44 for: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.umul.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:228 CodeSize:140 Lat:150 SizeLat:150 for: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.umul.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:616 CodeSize:278 Lat:298 SizeLat:298 for: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.umul.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 5 for: %I8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:94 CodeSize:71 Lat:76 SizeLat:76 for: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.umul.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:356 CodeSize:268 Lat:278 SizeLat:278 for: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.umul.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:872 CodeSize:534 Lat:554 SizeLat:554 for: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.umul.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 undef, i64 undef)
%V2I64 = call {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
diff --git a/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll b/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll
index 34397bd..5d227a8 100644
--- a/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll
+++ b/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll
@@ -1,10 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8m.main < %s | FileCheck %s --check-prefix=V8M-RECIP
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=armv8a-linux-gnueabihf < %s | FileCheck %s --check-prefix=NEON-RECIP
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=armv8.1m.main -mattr=+mve < %s | FileCheck %s --check-prefix=MVE-RECIP
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=thumbv8m.main < %s | FileCheck %s --check-prefix=V8M-SIZE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=armv8a-linux-gnueabihf < %s | FileCheck %s --check-prefix=NEON-SIZE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=armv8.1m.main -mattr=+mve < %s | FileCheck %s --check-prefix=MVE-SIZE
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8m.main < %s | FileCheck %s --check-prefix=V8M
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=armv8a-linux-gnueabihf < %s | FileCheck %s --check-prefix=NEON
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=armv8.1m.main -mattr=+mve < %s | FileCheck %s --check-prefix=MVE
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
@@ -35,155 +32,80 @@ declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>)
declare <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8>, <64 x i8>)
define i32 @add(i32 %arg) {
-; V8M-RECIP-LABEL: 'add'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 60 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 256 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 256 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 512 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'add'
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:30 CodeSize:34 Lat:30 SizeLat:30 for: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:60 CodeSize:68 Lat:60 SizeLat:60 for: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:120 CodeSize:136 Lat:120 SizeLat:120 for: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:256 CodeSize:320 Lat:256 SizeLat:256 for: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:256 CodeSize:320 Lat:256 SizeLat:256 for: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:512 CodeSize:640 Lat:512 SizeLat:512 for: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
;
-; NEON-RECIP-LABEL: 'add'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+; NEON-LABEL: 'add'
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:7 Lat:7 SizeLat:7 for: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
-; MVE-RECIP-LABEL: 'add'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 166 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 298 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 594 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 100 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 100 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 100 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'add'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 640 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-SIZE-LABEL: 'add'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; MVE-SIZE-LABEL: 'add'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 185 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 369 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; MVE-LABEL: 'add'
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:166 CodeSize:112 Lat:166 SizeLat:166 for: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:298 CodeSize:185 Lat:298 SizeLat:298 for: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:594 CodeSize:369 Lat:594 SizeLat:594 for: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of 1 for: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:100 CodeSize:70 Lat:100 SizeLat:100 for: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V8I32 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V16I32 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I16 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:100 CodeSize:70 Lat:100 SizeLat:100 for: %V2I16 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I16 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V8I16 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V16I16 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V32I16 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I8 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:100 CodeSize:70 Lat:100 SizeLat:100 for: %V2I8 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I8 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V8I8 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V16I8 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V32I8 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V64I8 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
%V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
@@ -241,155 +163,80 @@ declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>)
declare <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8>, <64 x i8>)
define i32 @sub(i32 %arg) {
-; V8M-RECIP-LABEL: 'sub'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 60 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 256 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 256 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 512 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-RECIP-LABEL: 'sub'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; MVE-RECIP-LABEL: 'sub'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 166 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 298 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 594 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 100 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 100 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 100 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'sub'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 640 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'sub'
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:30 CodeSize:34 Lat:30 SizeLat:30 for: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:60 CodeSize:68 Lat:60 SizeLat:60 for: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:120 CodeSize:136 Lat:120 SizeLat:120 for: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:256 CodeSize:320 Lat:256 SizeLat:256 for: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:256 CodeSize:320 Lat:256 SizeLat:256 for: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:512 CodeSize:640 Lat:512 SizeLat:512 for: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
;
-; NEON-SIZE-LABEL: 'sub'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; NEON-LABEL: 'sub'
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:7 Lat:7 SizeLat:7 for: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
-; MVE-SIZE-LABEL: 'sub'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 185 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 369 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; MVE-LABEL: 'sub'
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:166 CodeSize:112 Lat:166 SizeLat:166 for: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:298 CodeSize:185 Lat:298 SizeLat:298 for: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:594 CodeSize:369 Lat:594 SizeLat:594 for: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of 1 for: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:100 CodeSize:70 Lat:100 SizeLat:100 for: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V8I32 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V16I32 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I16 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:100 CodeSize:70 Lat:100 SizeLat:100 for: %V2I16 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I16 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V8I16 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V16I16 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V32I16 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I8 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:100 CodeSize:70 Lat:100 SizeLat:100 for: %V2I8 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I8 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V8I8 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V16I8 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V32I8 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V64I8 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
%V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
diff --git a/llvm/test/Analysis/CostModel/ARM/arith-usat.ll b/llvm/test/Analysis/CostModel/ARM/arith-usat.ll
index 28e4860..c843331 100644
--- a/llvm/test/Analysis/CostModel/ARM/arith-usat.ll
+++ b/llvm/test/Analysis/CostModel/ARM/arith-usat.ll
@@ -1,10 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8m.main < %s | FileCheck %s --check-prefix=V8M-RECIP
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=armv8a-linux-gnueabihf < %s | FileCheck %s --check-prefix=NEON-RECIP
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=armv8.1m.main -mattr=+mve < %s | FileCheck %s --check-prefix=MVE-RECIP
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=thumbv8m.main < %s | FileCheck %s --check-prefix=V8M-SIZE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=armv8a-linux-gnueabihf < %s | FileCheck %s --check-prefix=NEON-SIZE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=armv8.1m.main -mattr=+mve < %s | FileCheck %s --check-prefix=MVE-SIZE
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8m.main < %s | FileCheck %s --check-prefix=V8M
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=armv8a-linux-gnueabihf < %s | FileCheck %s --check-prefix=NEON
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=armv8.1m.main -mattr=+mve < %s | FileCheck %s --check-prefix=MVE
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
@@ -35,155 +32,80 @@ declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
declare <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8>, <64 x i8>)
define i32 @add(i32 %arg) {
-; V8M-RECIP-LABEL: 'add'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 256 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'add'
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:18 Lat:16 SizeLat:16 for: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:36 Lat:32 SizeLat:32 for: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:72 Lat:64 SizeLat:64 for: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:256 CodeSize:320 Lat:256 SizeLat:256 for: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
;
-; NEON-RECIP-LABEL: 'add'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+; NEON-LABEL: 'add'
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:7 Lat:7 SizeLat:7 for: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 5 for: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
-; MVE-RECIP-LABEL: 'add'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 74 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 148 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 296 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'add'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-SIZE-LABEL: 'add'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; MVE-SIZE-LABEL: 'add'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 55 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 110 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 220 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 29 for instruction: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 29 for instruction: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 29 for instruction: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; MVE-LABEL: 'add'
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:74 CodeSize:55 Lat:74 SizeLat:74 for: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:148 CodeSize:110 Lat:148 SizeLat:148 for: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:296 CodeSize:220 Lat:296 SizeLat:296 for: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:40 CodeSize:29 Lat:40 SizeLat:40 for: %V2I32 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:40 CodeSize:29 Lat:40 SizeLat:40 for: %V2I16 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I16 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:40 CodeSize:29 Lat:40 SizeLat:40 for: %V2I8 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I8 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V8I8 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
%V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
@@ -241,155 +163,80 @@ declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>)
declare <64 x i8> @llvm.usub.sat.v64i8(<64 x i8>, <64 x i8>)
define i32 @sub(i32 %arg) {
-; V8M-RECIP-LABEL: 'sub'
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 128 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 256 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
-;
-; NEON-RECIP-LABEL: 'sub'
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; MVE-RECIP-LABEL: 'sub'
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 74 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 148 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 296 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
-;
-; V8M-SIZE-LABEL: 'sub'
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; V8M-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; V8M-LABEL: 'sub'
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:18 Lat:16 SizeLat:16 for: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:36 Lat:32 SizeLat:32 for: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:72 Lat:64 SizeLat:64 for: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:16 CodeSize:20 Lat:16 SizeLat:16 for: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:32 CodeSize:40 Lat:32 SizeLat:32 for: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:64 CodeSize:80 Lat:64 SizeLat:64 for: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:128 CodeSize:160 Lat:128 SizeLat:128 for: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of RThru:256 CodeSize:320 Lat:256 SizeLat:256 for: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; V8M-NEXT: Cost Model: Found costs of 1 for: ret i32 undef
;
-; NEON-SIZE-LABEL: 'sub'
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; NEON-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; NEON-LABEL: 'sub'
+; NEON-NEXT: Cost Model: Found costs of RThru:8 CodeSize:7 Lat:7 SizeLat:7 for: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; NEON-NEXT: Cost Model: Found costs of 5 for: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; NEON-NEXT: Cost Model: Found costs of 2 for: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 1 for: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 4 for: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of 8 for: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; NEON-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
-; MVE-SIZE-LABEL: 'sub'
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 55 for instruction: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 110 for instruction: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 220 for instruction: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 29 for instruction: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 29 for instruction: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 29 for instruction: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
-; MVE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 undef
+; MVE-LABEL: 'sub'
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:10 Lat:8 SizeLat:8 for: %I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:74 CodeSize:55 Lat:74 SizeLat:74 for: %V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:148 CodeSize:110 Lat:148 SizeLat:148 for: %V4I64 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:296 CodeSize:220 Lat:296 SizeLat:296 for: %V8I64 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %I32 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:40 CodeSize:29 Lat:40 SizeLat:40 for: %V2I32 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V4I32 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V8I32 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V16I32 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I16 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:40 CodeSize:29 Lat:40 SizeLat:40 for: %V2I16 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I16 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V8I16 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V16I16 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V32I16 = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-NEXT: Cost Model: Found costs of 2 for: %I8 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:40 CodeSize:29 Lat:40 SizeLat:40 for: %V2I8 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V4I8 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V8I8 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %V16I8 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %V32I8 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %V64I8 = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret i32 undef
;
%I64 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
%V2I64 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/fround.ll b/llvm/test/Analysis/CostModel/RISCV/fround.ll
index 602ef5c..23572898 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fround.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fround.ll
@@ -424,250 +424,328 @@ define void @rint_fp16() {
define void @lrint() {
; CHECK-LABEL: 'lrint'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.lrint.i64.bf16(bfloat undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %2 = call <2 x i64> @llvm.lrint.v2i64.v2bf16(<2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %3 = call <4 x i64> @llvm.lrint.v4i64.v4bf16(<4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %4 = call <8 x i64> @llvm.lrint.v8i64.v8bf16(<8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %5 = call <16 x i64> @llvm.lrint.v16i64.v16bf16(<16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i64 @llvm.lrint.i64.f32(float undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f32(<vscale x 4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f32(<vscale x 8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %20 = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f32(<vscale x 16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i64 @llvm.lrint.i64.f64(double undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f64(<vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i32 @llvm.lrint.i32.bf16(bfloat poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i32> @llvm.lrint.v2i32.v2bf16(<2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i32> @llvm.lrint.v4i32.v4bf16(<4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x i32> @llvm.lrint.v8i32.v8bf16(<8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %5 = call <16 x i32> @llvm.lrint.v16i32.v16bf16(<16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i32> @llvm.lrint.nxv1i32.nxv1bf16(<vscale x 1 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4bf16(<vscale x 4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %9 = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8bf16(<vscale x 8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %10 = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16bf16(<vscale x 16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i32 @llvm.lrint.i32.f32(float poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i32> @llvm.lrint.v2i32.v2f32(<2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i32> @llvm.lrint.v4i32.v4f32(<4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %14 = call <8 x i32> @llvm.lrint.v8i32.v8f32(<8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %15 = call <16 x i32> @llvm.lrint.v16i32.v16f32(<16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i32> @llvm.lrint.nxv1i32.nxv1f32(<vscale x 1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f32(<vscale x 2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %18 = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f32(<vscale x 4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %19 = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f32(<vscale x 8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f32(<vscale x 16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i32 @llvm.lrint.i32.f64(double poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.lrint.v2i32.v2f64(<2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i32> @llvm.lrint.v4i32.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i32> @llvm.lrint.v8i32.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i32> @llvm.lrint.v16i32.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.lrint.nxv1i32.nxv1f64(<vscale x 1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call i32 @llvm.lrint.i32.bf16(bfloat poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call <2 x i64> @llvm.lrint.v2i64.v2bf16(<2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %32 = call <4 x i64> @llvm.lrint.v4i64.v4bf16(<4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %33 = call <8 x i64> @llvm.lrint.v8i64.v8bf16(<8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %34 = call <16 x i64> @llvm.lrint.v16i64.v16bf16(<16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %36 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %37 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %38 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %39 = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %40 = call i64 @llvm.lrint.i64.f32(float poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %41 = call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %42 = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %43 = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %44 = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %45 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %46 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %47 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f32(<vscale x 4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %48 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f32(<vscale x 8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %49 = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f32(<vscale x 16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %50 = call i64 @llvm.lrint.i64.f64(double poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %51 = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %52 = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %53 = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %54 = call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %55 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %56 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %57 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %58 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f64(<vscale x 8 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- call i64 @llvm.lrint.i64.bf16(bfloat undef)
- call <2 x i64> @llvm.lrint.v2i64.v2bf16(<2 x bfloat> undef)
- call <4 x i64> @llvm.lrint.v4i64.v4bf16(<4 x bfloat> undef)
- call <8 x i64> @llvm.lrint.v8i64.v8bf16(<8 x bfloat> undef)
- call <16 x i64> @llvm.lrint.v16i64.v16bf16(<16 x bfloat> undef)
- call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> undef)
- call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> undef)
- call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> undef)
- call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> undef)
- call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> undef)
- call i64 @llvm.lrint.i64.f32(float undef)
- call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> undef)
- call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> undef)
- call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> undef)
- call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> undef)
- call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float> undef)
- call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> undef)
- call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f32(<vscale x 4 x float> undef)
- call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f32(<vscale x 8 x float> undef)
- call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f32(<vscale x 16 x float> undef)
- call i64 @llvm.lrint.i64.f64(double undef)
- call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> undef)
- call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> undef)
- call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> undef)
- call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> undef)
- call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double> undef)
- call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> undef)
- call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> undef)
- call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f64(<vscale x 8 x double> undef)
+ call i32 @llvm.lrint.i32.bf16(bfloat poison)
+ call <2 x i32> @llvm.lrint.v2i32.v2bf16(<2 x bfloat> poison)
+ call <4 x i32> @llvm.lrint.v4i32.v4bf16(<4 x bfloat> poison)
+ call <8 x i32> @llvm.lrint.v8i32.v8bf16(<8 x bfloat> poison)
+ call <16 x i32> @llvm.lrint.v16i32.v16bf16(<16 x bfloat> poison)
+ call <vscale x 1 x i32> @llvm.lrint.nxv1i32.nxv1bf16(<vscale x 1 x bfloat> poison)
+ call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> poison)
+ call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4bf16(<vscale x 4 x bfloat> poison)
+ call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8bf16(<vscale x 8 x bfloat> poison)
+ call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16bf16(<vscale x 16 x bfloat> poison)
+ call i32 @llvm.lrint.i32.f32(float poison)
+ call <2 x i32> @llvm.lrint.v2i32.v2f32(<2 x float> poison)
+ call <4 x i32> @llvm.lrint.v4i32.v4f32(<4 x float> poison)
+ call <8 x i32> @llvm.lrint.v8i32.v8f32(<8 x float> poison)
+ call <16 x i32> @llvm.lrint.v16i32.v16f32(<16 x float> poison)
+ call <vscale x 1 x i32> @llvm.lrint.nxv1i32.nxv1f32(<vscale x 1 x float> poison)
+ call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f32(<vscale x 2 x float> poison)
+ call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f32(<vscale x 4 x float> poison)
+ call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f32(<vscale x 8 x float> poison)
+ call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f32(<vscale x 16 x float> poison)
+ call i32 @llvm.lrint.i32.f64(double poison)
+ call <2 x i32> @llvm.lrint.v2i32.v2f64(<2 x double> poison)
+ call <4 x i32> @llvm.lrint.v4i32.v4f64(<4 x double> poison)
+ call <8 x i32> @llvm.lrint.v8i32.v8f64(<8 x double> poison)
+ call <16 x i32> @llvm.lrint.v16i32.v16f64(<16 x double> poison)
+ call <vscale x 1 x i32> @llvm.lrint.nxv1i32.nxv1f64(<vscale x 1 x double> poison)
+ call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
+ call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
+ call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
+ call i32 @llvm.lrint.i32.bf16(bfloat poison)
+ call <2 x i64> @llvm.lrint.v2i64.v2bf16(<2 x bfloat> poison)
+ call <4 x i64> @llvm.lrint.v4i64.v4bf16(<4 x bfloat> poison)
+ call <8 x i64> @llvm.lrint.v8i64.v8bf16(<8 x bfloat> poison)
+ call <16 x i64> @llvm.lrint.v16i64.v16bf16(<16 x bfloat> poison)
+ call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> poison)
+ call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> poison)
+ call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> poison)
+ call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> poison)
+ call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> poison)
+ call i64 @llvm.lrint.i64.f32(float poison)
+ call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> poison)
+ call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> poison)
+ call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> poison)
+ call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> poison)
+ call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float> poison)
+ call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> poison)
+ call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f32(<vscale x 4 x float> poison)
+ call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f32(<vscale x 8 x float> poison)
+ call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f32(<vscale x 16 x float> poison)
+ call i64 @llvm.lrint.i64.f64(double poison)
+ call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> poison)
+ call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> poison)
+ call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> poison)
+ call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> poison)
+ call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double> poison)
+ call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> poison)
+ call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> poison)
+ call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f64(<vscale x 8 x double> poison)
ret void
}
define void @lrint_fp16() {
; CHECK-LABEL: 'lrint_fp16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.lrint.i64.f16(half undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %2 = call <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %3 = call <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %4 = call <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %5 = call <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f16(<vscale x 1 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f16(<vscale x 2 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f16(<vscale x 4 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f16(<vscale x 8 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f16(<vscale x 16 x half> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i32 @llvm.lrint.i32.f16(half poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i32> @llvm.lrint.v2i32.v2f16(<2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i32> @llvm.lrint.v4i32.v4f16(<4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x i32> @llvm.lrint.v8i32.v8f16(<8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %5 = call <16 x i32> @llvm.lrint.v16i32.v16f16(<16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i32> @llvm.lrint.nxv1i32.nxv1f16(<vscale x 1 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f16(<vscale x 2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f16(<vscale x 4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %9 = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f16(<vscale x 8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %10 = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f16(<vscale x 16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i32 @llvm.lrint.i32.f16(half poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %14 = call <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %15 = call <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f16(<vscale x 1 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f16(<vscale x 2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %18 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f16(<vscale x 4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %19 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f16(<vscale x 8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %20 = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f16(<vscale x 16 x half> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- call i64 @llvm.lrint.f16(half undef)
- call <2 x i64> @llvm.lrint.v2f16(<2 x half> undef)
- call <4 x i64> @llvm.lrint.v4f16(<4 x half> undef)
- call <8 x i64> @llvm.lrint.v8f16(<8 x half> undef)
- call <16 x i64> @llvm.lrint.v16f16(<16 x half> undef)
- call <vscale x 1 x i64> @llvm.lrint.nxv1f16(<vscale x 1 x half> undef)
- call <vscale x 2 x i64> @llvm.lrint.nxv2f16(<vscale x 2 x half> undef)
- call <vscale x 4 x i64> @llvm.lrint.nxv4f16(<vscale x 4 x half> undef)
- call <vscale x 8 x i64> @llvm.lrint.nxv8f16(<vscale x 8 x half> undef)
- call <vscale x 16 x i64> @llvm.lrint.nxv16f16(<vscale x 16 x half> undef)
+ call i32 @llvm.lrint.f16(half poison)
+ call <2 x i32> @llvm.lrint.v2f16(<2 x half> poison)
+ call <4 x i32> @llvm.lrint.v4f16(<4 x half> poison)
+ call <8 x i32> @llvm.lrint.v8f16(<8 x half> poison)
+ call <16 x i32> @llvm.lrint.v16f16(<16 x half> poison)
+ call <vscale x 1 x i32> @llvm.lrint.nxv1f16(<vscale x 1 x half> poison)
+ call <vscale x 2 x i32> @llvm.lrint.nxv2f16(<vscale x 2 x half> poison)
+ call <vscale x 4 x i32> @llvm.lrint.nxv4f16(<vscale x 4 x half> poison)
+ call <vscale x 8 x i32> @llvm.lrint.nxv8f16(<vscale x 8 x half> poison)
+ call <vscale x 16 x i32> @llvm.lrint.nxv16f16(<vscale x 16 x half> poison)
+ call i32 @llvm.lrint.f16(half poison)
+ call <2 x i64> @llvm.lrint.v2f16(<2 x half> poison)
+ call <4 x i64> @llvm.lrint.v4f16(<4 x half> poison)
+ call <8 x i64> @llvm.lrint.v8f16(<8 x half> poison)
+ call <16 x i64> @llvm.lrint.v16f16(<16 x half> poison)
+ call <vscale x 1 x i64> @llvm.lrint.nxv1f16(<vscale x 1 x half> poison)
+ call <vscale x 2 x i64> @llvm.lrint.nxv2f16(<vscale x 2 x half> poison)
+ call <vscale x 4 x i64> @llvm.lrint.nxv4f16(<vscale x 4 x half> poison)
+ call <vscale x 8 x i64> @llvm.lrint.nxv8f16(<vscale x 8 x half> poison)
+ call <vscale x 16 x i64> @llvm.lrint.nxv16f16(<vscale x 16 x half> poison)
ret void
}
define void @llrint() {
; CHECK-LABEL: 'llrint'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llrint.i64.bf16(bfloat undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %2 = call <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %3 = call <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %4 = call <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %5 = call <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i64 @llvm.llrint.i64.f32(float undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %20 = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i64 @llvm.llrint.i64.f64(double undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llrint.i64.bf16(bfloat poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %4 = call <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %5 = call <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %8 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %9 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %10 = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i64 @llvm.llrint.i64.f32(float poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %14 = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %15 = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %18 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %19 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %20 = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i64 @llvm.llrint.i64.f64(double poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- call i64 @llvm.llrint.i64.bf16(bfloat undef)
- call <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat> undef)
- call <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat> undef)
- call <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat> undef)
- call <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat> undef)
- call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> undef)
- call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> undef)
- call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> undef)
- call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> undef)
- call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> undef)
- call i64 @llvm.llrint.i64.f32(float undef)
- call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> undef)
- call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> undef)
- call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> undef)
- call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> undef)
- call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> undef)
- call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> undef)
- call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> undef)
- call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> undef)
- call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> undef)
- call i64 @llvm.llrint.i64.f64(double undef)
- call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> undef)
- call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> undef)
- call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> undef)
- call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> undef)
- call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> undef)
- call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> undef)
- call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> undef)
- call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> undef)
+ call i64 @llvm.llrint.i64.bf16(bfloat poison)
+ call <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat> poison)
+ call <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat> poison)
+ call <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat> poison)
+ call <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat> poison)
+ call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> poison)
+ call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> poison)
+ call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> poison)
+ call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> poison)
+ call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> poison)
+ call i64 @llvm.llrint.i64.f32(float poison)
+ call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> poison)
+ call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> poison)
+ call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> poison)
+ call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> poison)
+ call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> poison)
+ call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> poison)
+ call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> poison)
+ call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> poison)
+ call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> poison)
+ call i64 @llvm.llrint.i64.f64(double poison)
+ call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> poison)
+ call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> poison)
+ call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> poison)
+ call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> poison)
+ call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> poison)
+ call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> poison)
+ call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> poison)
+ call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> poison)
ret void
}
define void @llrint_fp16() {
; CHECK-LABEL: 'llrint_fp16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llrint.i64.f16(half undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %2 = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %3 = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %4 = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %5 = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llrint.i64.f16(half poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %4 = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %5 = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %8 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %9 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %10 = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- call i64 @llvm.llrint.f16(half undef)
- call <2 x i64> @llvm.llrint.v2f16(<2 x half> undef)
- call <4 x i64> @llvm.llrint.v4f16(<4 x half> undef)
- call <8 x i64> @llvm.llrint.v8f16(<8 x half> undef)
- call <16 x i64> @llvm.llrint.v16f16(<16 x half> undef)
- call <vscale x 1 x i64> @llvm.llrint.nxv1f16(<vscale x 1 x half> undef)
- call <vscale x 2 x i64> @llvm.llrint.nxv2f16(<vscale x 2 x half> undef)
- call <vscale x 4 x i64> @llvm.llrint.nxv4f16(<vscale x 4 x half> undef)
- call <vscale x 8 x i64> @llvm.llrint.nxv8f16(<vscale x 8 x half> undef)
- call <vscale x 16 x i64> @llvm.llrint.nxv16f16(<vscale x 16 x half> undef)
+ call i64 @llvm.llrint.f16(half poison)
+ call <2 x i64> @llvm.llrint.v2f16(<2 x half> poison)
+ call <4 x i64> @llvm.llrint.v4f16(<4 x half> poison)
+ call <8 x i64> @llvm.llrint.v8f16(<8 x half> poison)
+ call <16 x i64> @llvm.llrint.v16f16(<16 x half> poison)
+ call <vscale x 1 x i64> @llvm.llrint.nxv1f16(<vscale x 1 x half> poison)
+ call <vscale x 2 x i64> @llvm.llrint.nxv2f16(<vscale x 2 x half> poison)
+ call <vscale x 4 x i64> @llvm.llrint.nxv4f16(<vscale x 4 x half> poison)
+ call <vscale x 8 x i64> @llvm.llrint.nxv8f16(<vscale x 8 x half> poison)
+ call <vscale x 16 x i64> @llvm.llrint.nxv16f16(<vscale x 16 x half> poison)
ret void
}
define void @lround() {
; CHECK-LABEL: 'lround'
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i32 @llvm.lround.i32.bf16(bfloat poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i32> @llvm.lround.v2i32.v2bf16(<2 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i32> @llvm.lround.v4i32.v4bf16(<4 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %4 = call <8 x i32> @llvm.lround.v8i32.v8bf16(<8 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %5 = call <16 x i32> @llvm.lround.v16i32.v16bf16(<16 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 1 x i32> @llvm.lround.nxv1i32.nxv1bf16(<vscale x 1 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4bf16(<vscale x 4 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8bf16(<vscale x 8 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16bf16(<vscale x 16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i32> @llvm.lround.v2i32.v2bf16(<2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i32> @llvm.lround.v4i32.v4bf16(<4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x i32> @llvm.lround.v8i32.v8bf16(<8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %5 = call <16 x i32> @llvm.lround.v16i32.v16bf16(<16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i32> @llvm.lround.nxv1i32.nxv1bf16(<vscale x 1 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4bf16(<vscale x 4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %9 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8bf16(<vscale x 8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %10 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16bf16(<vscale x 16 x bfloat> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i32 @llvm.lround.i32.f32(float poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %12 = call <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %13 = call <4 x i32> @llvm.lround.v4i32.v4f32(<4 x float> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %14 = call <8 x i32> @llvm.lround.v8i32.v8f32(<8 x float> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %15 = call <16 x i32> @llvm.lround.v16i32.v16f32(<16 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 1 x i32> @llvm.lround.nxv1i32.nxv1f32(<vscale x 1 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2f32(<vscale x 2 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4f32(<vscale x 4 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %19 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8f32(<vscale x 8 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %20 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16f32(<vscale x 16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i32> @llvm.lround.v4i32.v4f32(<4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %14 = call <8 x i32> @llvm.lround.v8i32.v8f32(<8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %15 = call <16 x i32> @llvm.lround.v16i32.v16f32(<16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i32> @llvm.lround.nxv1i32.nxv1f32(<vscale x 1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2f32(<vscale x 2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %18 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4f32(<vscale x 4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %19 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8f32(<vscale x 8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16f32(<vscale x 16 x float> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i32 @llvm.lround.i32.f64(double poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %22 = call <2 x i32> @llvm.lround.v2i32.v2f64(<2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %23 = call <4 x i32> @llvm.lround.v4i32.v4f64(<4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %24 = call <8 x i32> @llvm.lround.v8i32.v8f64(<8 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %25 = call <16 x i32> @llvm.lround.v16i32.v16f64(<16 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 1 x i32> @llvm.lround.nxv1i32.nxv1f64(<vscale x 1 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %28 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %29 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %30 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16f64(<vscale x 16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.lround.v2i32.v2f64(<2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i32> @llvm.lround.v4i32.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i32> @llvm.lround.v8i32.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i32> @llvm.lround.v16i32.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.lround.nxv1i32.nxv1f64(<vscale x 1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %30 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16f64(<vscale x 16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call i64 @llvm.lround.i64.bf16(bfloat poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %32 = call <2 x i64> @llvm.lround.v2i64.v2bf16(<2 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %33 = call <4 x i64> @llvm.lround.v4i64.v4bf16(<4 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %34 = call <8 x i64> @llvm.lround.v8i64.v8bf16(<8 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %35 = call <16 x i64> @llvm.lround.v16i64.v16bf16(<16 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %36 = call <vscale x 1 x i64> @llvm.lround.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %37 = call <vscale x 2 x i64> @llvm.lround.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %38 = call <vscale x 4 x i64> @llvm.lround.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %39 = call <vscale x 8 x i64> @llvm.lround.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %40 = call <vscale x 16 x i64> @llvm.lround.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.lround.v2i64.v2bf16(<2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.lround.v4i64.v4bf16(<4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.lround.v8i64.v8bf16(<8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.lround.v16i64.v16bf16(<16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.lround.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.lround.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.lround.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.lround.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %40 = call <vscale x 16 x i64> @llvm.lround.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %41 = call i64 @llvm.lround.i64.f32(float poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %42 = call <2 x i64> @llvm.lround.v2i64.v2f32(<2 x float> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %43 = call <4 x i64> @llvm.lround.v4i64.v4f32(<4 x float> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %44 = call <8 x i64> @llvm.lround.v8i64.v8f32(<8 x float> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %45 = call <16 x i64> @llvm.lround.v16i64.v16f32(<16 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %46 = call <vscale x 1 x i64> @llvm.lround.nxv1i64.nxv1f32(<vscale x 1 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %47 = call <vscale x 2 x i64> @llvm.lround.nxv2i64.nxv2f32(<vscale x 2 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %48 = call <vscale x 4 x i64> @llvm.lround.nxv4i64.nxv4f32(<vscale x 4 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %49 = call <vscale x 8 x i64> @llvm.lround.nxv8i64.nxv8f32(<vscale x 8 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %50 = call <vscale x 16 x i64> @llvm.lround.nxv16i64.nxv16f32(<vscale x 16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %42 = call <2 x i64> @llvm.lround.v2i64.v2f32(<2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %43 = call <4 x i64> @llvm.lround.v4i64.v4f32(<4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %44 = call <8 x i64> @llvm.lround.v8i64.v8f32(<8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %45 = call <16 x i64> @llvm.lround.v16i64.v16f32(<16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %46 = call <vscale x 1 x i64> @llvm.lround.nxv1i64.nxv1f32(<vscale x 1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %47 = call <vscale x 2 x i64> @llvm.lround.nxv2i64.nxv2f32(<vscale x 2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %48 = call <vscale x 4 x i64> @llvm.lround.nxv4i64.nxv4f32(<vscale x 4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %49 = call <vscale x 8 x i64> @llvm.lround.nxv8i64.nxv8f32(<vscale x 8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %50 = call <vscale x 16 x i64> @llvm.lround.nxv16i64.nxv16f32(<vscale x 16 x float> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %51 = call i64 @llvm.lround.i64.f64(double poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %52 = call <2 x i64> @llvm.lround.v2i64.v2f64(<2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %53 = call <4 x i64> @llvm.lround.v4i64.v4f64(<4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %54 = call <8 x i64> @llvm.lround.v8i64.v8f64(<8 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %55 = call <16 x i64> @llvm.lround.v16i64.v16f64(<16 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %56 = call <vscale x 1 x i64> @llvm.lround.nxv1i64.nxv1f64(<vscale x 1 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %57 = call <vscale x 2 x i64> @llvm.lround.nxv2i64.nxv2f64(<vscale x 2 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %58 = call <vscale x 4 x i64> @llvm.lround.nxv4i64.nxv4f64(<vscale x 4 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %59 = call <vscale x 8 x i64> @llvm.lround.nxv8i64.nxv8f64(<vscale x 8 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %60 = call <vscale x 16 x i64> @llvm.lround.nxv16i64.nxv16f64(<vscale x 16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %52 = call <2 x i64> @llvm.lround.v2i64.v2f64(<2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %53 = call <4 x i64> @llvm.lround.v4i64.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %54 = call <8 x i64> @llvm.lround.v8i64.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %55 = call <16 x i64> @llvm.lround.v16i64.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %56 = call <vscale x 1 x i64> @llvm.lround.nxv1i64.nxv1f64(<vscale x 1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %57 = call <vscale x 2 x i64> @llvm.lround.nxv2i64.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %58 = call <vscale x 4 x i64> @llvm.lround.nxv4i64.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %59 = call <vscale x 8 x i64> @llvm.lround.nxv8i64.nxv8f64(<vscale x 8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %60 = call <vscale x 16 x i64> @llvm.lround.nxv16i64.nxv16f64(<vscale x 16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
call i32 @llvm.lround.i32.bf16(bfloat poison)
@@ -733,38 +811,85 @@ define void @lround() {
ret void
}
+define void @lround_fp16() {
+; CHECK-LABEL: 'lround_fp16'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i32 @llvm.lround.i32.f16(half poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i32> @llvm.lround.v2i32.v2f16(<2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i32> @llvm.lround.v4i32.v4f16(<4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x i32> @llvm.lround.v8i32.v8f16(<8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %5 = call <16 x i32> @llvm.lround.v16i32.v16f16(<16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i32> @llvm.lround.nxv1i32.nxv1f16(<vscale x 1 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2f16(<vscale x 2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4f16(<vscale x 4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %9 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8f16(<vscale x 8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %10 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16f16(<vscale x 16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i32 @llvm.lround.i32.f16(half poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.lround.v2i64.v2f16(<2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x i64> @llvm.lround.v4i64.v4f16(<4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %14 = call <8 x i64> @llvm.lround.v8i64.v8f16(<8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %15 = call <16 x i64> @llvm.lround.v16i64.v16f16(<16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i64> @llvm.lround.nxv1i64.nxv1f16(<vscale x 1 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 2 x i64> @llvm.lround.nxv2i64.nxv2f16(<vscale x 2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %18 = call <vscale x 4 x i64> @llvm.lround.nxv4i64.nxv4f16(<vscale x 4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %19 = call <vscale x 8 x i64> @llvm.lround.nxv8i64.nxv8f16(<vscale x 8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %20 = call <vscale x 16 x i64> @llvm.lround.nxv16i64.nxv16f16(<vscale x 16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call i32 @llvm.lround.f16(half poison)
+ call <2 x i32> @llvm.lround.v2f16(<2 x half> poison)
+ call <4 x i32> @llvm.lround.v4f16(<4 x half> poison)
+ call <8 x i32> @llvm.lround.v8f16(<8 x half> poison)
+ call <16 x i32> @llvm.lround.v16f16(<16 x half> poison)
+ call <vscale x 1 x i32> @llvm.lround.nxv1f16(<vscale x 1 x half> poison)
+ call <vscale x 2 x i32> @llvm.lround.nxv2f16(<vscale x 2 x half> poison)
+ call <vscale x 4 x i32> @llvm.lround.nxv4f16(<vscale x 4 x half> poison)
+ call <vscale x 8 x i32> @llvm.lround.nxv8f16(<vscale x 8 x half> poison)
+ call <vscale x 16 x i32> @llvm.lround.nxv16f16(<vscale x 16 x half> poison)
+ call i32 @llvm.lround.f16(half poison)
+ call <2 x i64> @llvm.lround.v2f16(<2 x half> poison)
+ call <4 x i64> @llvm.lround.v4f16(<4 x half> poison)
+ call <8 x i64> @llvm.lround.v8f16(<8 x half> poison)
+ call <16 x i64> @llvm.lround.v16f16(<16 x half> poison)
+ call <vscale x 1 x i64> @llvm.lround.nxv1f16(<vscale x 1 x half> poison)
+ call <vscale x 2 x i64> @llvm.lround.nxv2f16(<vscale x 2 x half> poison)
+ call <vscale x 4 x i64> @llvm.lround.nxv4f16(<vscale x 4 x half> poison)
+ call <vscale x 8 x i64> @llvm.lround.nxv8f16(<vscale x 8 x half> poison)
+ call <vscale x 16 x i64> @llvm.lround.nxv16f16(<vscale x 16 x half> poison)
+ ret void
+}
+
define void @llround() {
; CHECK-LABEL: 'llround'
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llround.i64.bf16(bfloat poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i64> @llvm.llround.v2i64.v2bf16(<2 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %3 = call <4 x i64> @llvm.llround.v4i64.v4bf16(<4 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %4 = call <8 x i64> @llvm.llround.v8i64.v8bf16(<8 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %5 = call <16 x i64> @llvm.llround.v16i64.v16bf16(<16 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %10 = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i64> @llvm.llround.v2i64.v2bf16(<2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x i64> @llvm.llround.v4i64.v4bf16(<4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %4 = call <8 x i64> @llvm.llround.v8i64.v8bf16(<8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %5 = call <16 x i64> @llvm.llround.v16i64.v16bf16(<16 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %8 = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %9 = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %10 = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i64 @llvm.llround.i64.f32(float poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %12 = call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %13 = call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %14 = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %15 = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %16 = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f32(<vscale x 1 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %17 = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f32(<vscale x 2 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %18 = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f32(<vscale x 4 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %19 = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f32(<vscale x 8 x float> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %20 = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16f32(<vscale x 16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %14 = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %15 = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f32(<vscale x 1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f32(<vscale x 2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %18 = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f32(<vscale x 4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %19 = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f32(<vscale x 8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %20 = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16f32(<vscale x 16 x float> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i64 @llvm.llround.i64.f64(double poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %22 = call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %23 = call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %24 = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %25 = call <16 x i64> @llvm.llround.v16i64.v16f64(<16 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %26 = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f64(<vscale x 1 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %27 = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f64(<vscale x 2 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %28 = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f64(<vscale x 4 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %29 = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f64(<vscale x 8 x double> poison)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %30 = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16f64(<vscale x 16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i64> @llvm.llround.v16i64.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f64(<vscale x 1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f64(<vscale x 8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %30 = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16f64(<vscale x 16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
call i64 @llvm.llround.i64.bf16(bfloat poison)
@@ -800,6 +925,33 @@ define void @llround() {
ret void
}
+define void @llround_fp16() {
+; CHECK-LABEL: 'llround_fp16'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llround.i64.f16(half poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %4 = call <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %5 = call <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f16(<vscale x 1 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f16(<vscale x 2 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %8 = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f16(<vscale x 4 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %9 = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f16(<vscale x 8 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %10 = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16f16(<vscale x 16 x half> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call i64 @llvm.llround.f16(half poison)
+ call <2 x i64> @llvm.llround.v2f16(<2 x half> poison)
+ call <4 x i64> @llvm.llround.v4f16(<4 x half> poison)
+ call <8 x i64> @llvm.llround.v8f16(<8 x half> poison)
+ call <16 x i64> @llvm.llround.v16f16(<16 x half> poison)
+ call <vscale x 1 x i64> @llvm.llround.nxv1f16(<vscale x 1 x half> poison)
+ call <vscale x 2 x i64> @llvm.llround.nxv2f16(<vscale x 2 x half> poison)
+ call <vscale x 4 x i64> @llvm.llround.nxv4f16(<vscale x 4 x half> poison)
+ call <vscale x 8 x i64> @llvm.llround.nxv8f16(<vscale x 8 x half> poison)
+ call <vscale x 16 x i64> @llvm.llround.nxv16f16(<vscale x 16 x half> poison)
+ ret void
+}
+
define void @nearbyint() {
; CHECK-LABEL: 'nearbyint'
; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %1 = call bfloat @llvm.nearbyint.bf16(bfloat undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll b/llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll
index 68c89c3..892277a 100644
--- a/llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/masked_ldst.ll
@@ -13,14 +13,14 @@ define void @fixed() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i32 = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr undef, i32 8, <2 x i1> undef, <2 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr undef, i32 8, <4 x i1> undef, <4 x i32> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i64 = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr undef, i32 8, <2 x i1> undef, <2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = call <2 x half> @llvm.masked.load.v2f16.p0(ptr undef, i32 8, <2 x i1> undef, <2 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = call <4 x half> @llvm.masked.load.v4f16.p0(ptr undef, i32 8, <4 x i1> undef, <4 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f16 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr undef, i32 8, <8 x i1> undef, <8 x half> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v2f16 = call <2 x half> @llvm.masked.load.v2f16.p0(ptr undef, i32 8, <2 x i1> undef, <2 x half> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %v4f16 = call <4 x half> @llvm.masked.load.v4f16.p0(ptr undef, i32 8, <4 x i1> undef, <4 x half> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 39 for instruction: %v8f16 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr undef, i32 8, <8 x i1> undef, <8 x half> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = call <2 x float> @llvm.masked.load.v2f32.p0(ptr undef, i32 8, <2 x i1> undef, <2 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4f32 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr undef, i32 8, <4 x i1> undef, <4 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f64 = call <2 x double> @llvm.masked.load.v2f64.p0(ptr undef, i32 8, <2 x i1> undef, <2 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i64 = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr undef, i32 8, <4 x i1> undef, <4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v32f16 = call <32 x half> @llvm.masked.load.v32f16.p0(ptr undef, i32 8, <32 x i1> undef, <32 x half> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 223 for instruction: %v32f16 = call <32 x half> @llvm.masked.load.v32f16.p0(ptr undef, i32 8, <32 x i1> undef, <32 x half> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
entry:
diff --git a/llvm/test/Analysis/ScalarEvolution/zext-add.ll b/llvm/test/Analysis/ScalarEvolution/zext-add.ll
new file mode 100644
index 0000000..a08feef
--- /dev/null
+++ b/llvm/test/Analysis/ScalarEvolution/zext-add.ll
@@ -0,0 +1,52 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='print<scalar-evolution>' -disable-output %s 2>&1 | FileCheck %s
+
+declare i1 @cond()
+
+define void @test_push_constant_into_zext(ptr %dst, ptr %src, i32 %n, i64 %offset) {
+; CHECK-LABEL: 'test_push_constant_into_zext'
+; CHECK-NEXT: Classifying expressions for: @test_push_constant_into_zext
+; CHECK-NEXT: %outer.ptr = phi ptr [ %src, %entry ], [ %ptr.iv.next, %inner.loop ]
+; CHECK-NEXT: --> %outer.ptr U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %outer.loop: Variant, %inner.loop: Invariant }
+; CHECK-NEXT: %c = call i1 @cond()
+; CHECK-NEXT: --> %c U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %outer.loop: Variant, %inner.loop: Invariant }
+; CHECK-NEXT: %iv = phi i32 [ 0, %outer.loop ], [ %iv.next, %inner.loop ]
+; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%inner.loop> U: [0,2147483647) S: [0,2147483647) Exits: (-1 + (1 smax %n))<nsw> LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
+; CHECK-NEXT: %ptr.iv = phi ptr [ %src, %outer.loop ], [ %ptr.iv.next, %inner.loop ]
+; CHECK-NEXT: --> {%src,+,%offset}<%inner.loop> U: full-set S: full-set Exits: (((zext i32 (-1 + (1 smax %n))<nsw> to i64) * %offset) + %src) LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
+; CHECK-NEXT: %l = load i8, ptr %outer.ptr, align 1
+; CHECK-NEXT: --> %l U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %inner.loop: Variant, %outer.loop: Variant }
+; CHECK-NEXT: %ptr.iv.next = getelementptr i8, ptr %ptr.iv, i64 %offset
+; CHECK-NEXT: --> {(%offset + %src),+,%offset}<%inner.loop> U: full-set S: full-set Exits: (((zext i32 (1 smax %n) to i64) * %offset) + %src) LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
+; CHECK-NEXT: %iv.next = add i32 %iv, 1
+; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%inner.loop> U: [1,-2147483648) S: [1,-2147483648) Exits: (1 smax %n) LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
+; CHECK-NEXT: Determining loop execution counts for: @test_push_constant_into_zext
+; CHECK-NEXT: Loop %inner.loop: backedge-taken count is (-1 + (1 smax %n))<nsw>
+; CHECK-NEXT: Loop %inner.loop: constant max backedge-taken count is i32 2147483646
+; CHECK-NEXT: Loop %inner.loop: symbolic max backedge-taken count is (-1 + (1 smax %n))<nsw>
+; CHECK-NEXT: Loop %inner.loop: Trip multiple is 1
+; CHECK-NEXT: Loop %outer.loop: Unpredictable backedge-taken count.
+; CHECK-NEXT: Loop %outer.loop: Unpredictable constant max backedge-taken count.
+; CHECK-NEXT: Loop %outer.loop: Unpredictable symbolic max backedge-taken count.
+;
+entry:
+ br label %outer.loop
+
+outer.loop:
+ %outer.ptr = phi ptr [ %src, %entry ], [ %ptr.iv.next, %inner.loop ]
+ %c = call i1 @cond()
+ br i1 %c, label %inner.loop, label %exit
+
+inner.loop:
+ %iv = phi i32 [ 0, %outer.loop ], [ %iv.next, %inner.loop ]
+ %ptr.iv = phi ptr [ %src, %outer.loop ], [ %ptr.iv.next, %inner.loop ]
+ %l = load i8, ptr %outer.ptr, align 1
+ %ptr.iv.next = getelementptr i8, ptr %ptr.iv, i64 %offset
+ store i8 %l, ptr %dst, align 2
+ %iv.next = add i32 %iv, 1
+ %ec = icmp slt i32 %iv.next, %n
+ br i1 %ec, label %inner.loop, label %outer.loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
index f0d9aa4..639b6fd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
@@ -20,8 +20,8 @@ define i32 @cse_gep(ptr %ptr, i32 %idx) {
; O0-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
; O0-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64)
; O0-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; O0-NEXT: %11:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
- ; O0-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %11(p0) :: (load (s32) from %ir.gep2)
+ ; O0-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; O0-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.gep2)
; O0-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]]
; O0-NEXT: $w0 = COPY [[ADD]](s32)
; O0-NEXT: RET_ReallyLR implicit $w0
@@ -39,8 +39,8 @@ define i32 @cse_gep(ptr %ptr, i32 %idx) {
; O3-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
; O3-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1)
; O3-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; O3-NEXT: %9:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
- ; O3-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %9(p0) :: (load (s32) from %ir.gep2)
+ ; O3-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
+ ; O3-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.gep2)
; O3-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]]
; O3-NEXT: $w0 = COPY [[ADD]](s32)
; O3-NEXT: RET_ReallyLR implicit $w0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
index 3b12885..79b2e2e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
@@ -795,8 +795,8 @@ define void @jt_multiple_jump_tables(ptr %arg, i32 %arg1, ptr %arg2) {
; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[PHI]], [[C111]]
; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[MUL]](s64)
; CHECK-NEXT: [[C112:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: %120:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD]], [[C112]](s64)
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD %120(p0) :: (load (p0) from %ir.tmp59)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD]], [[C112]](s64)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD1]](p0) :: (load (p0) from %ir.tmp59)
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK-NEXT: $x0 = COPY [[COPY]](p0)
; CHECK-NEXT: $x1 = COPY [[LOAD]](p0)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index d4574187..675c953 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -599,10 +599,10 @@ define ptr @test_constant_null() {
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[VAL1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: [[VAL2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load (s32) from %ir.addr + 4)
; CHECK: G_STORE [[VAL1]](s8), [[ADDR]](p0) :: (store (s8) into %ir.addr, align 4)
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: G_STORE [[VAL2]](s32), [[GEP2]](p0) :: (store (s32) into %ir.addr + 4)
define void @test_struct_memops(ptr %addr) {
%val = load { i8, i32 }, ptr %addr
@@ -706,7 +706,7 @@ define float @test_frem(float %arg1, float %arg2) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SADDO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
define void @test_sadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
@@ -722,7 +722,7 @@ define void @test_sadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UADDO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
define void @test_uadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
@@ -738,7 +738,7 @@ define void @test_uadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SSUBO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.subr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4)
declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
define void @test_ssub_overflow(i32 %lhs, i32 %rhs, ptr %subr) {
@@ -754,7 +754,7 @@ define void @test_ssub_overflow(i32 %lhs, i32 %rhs, ptr %subr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_USUBO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.subr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4)
declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32)
define void @test_usub_overflow(i32 %lhs, i32 %rhs, ptr %subr) {
@@ -770,7 +770,7 @@ define void @test_usub_overflow(i32 %lhs, i32 %rhs, ptr %subr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SMULO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
define void @test_smul_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
@@ -786,7 +786,7 @@ define void @test_smul_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UMULO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
define void @test_umul_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
@@ -799,13 +799,13 @@ define void @test_umul_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
; CHECK: %0:_(p0) = COPY $x0
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
+; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
; CHECK: $w0 = COPY [[LD3]](s32)
%struct.nested = type {i8, { i8, i32 }, i32}
@@ -820,16 +820,16 @@ define i32 @test_extractvalue(ptr %addr) {
; CHECK: %1:_(p0) = COPY $x1
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
+; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
; CHECK: G_STORE [[LD2]](s8), %1(p0) :: (store (s8) into %ir.addr2, align 4)
-; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %1, [[CST1]](s64)
+; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %1, [[CST1]](s64)
; CHECK: G_STORE [[LD3]](s32), [[GEP4]](p0) :: (store (s32) into %ir.addr2 + 4)
define void @test_extractvalue_agg(ptr %addr, ptr %addr2) {
%struct = load %struct.nested, ptr %addr
@@ -854,20 +854,20 @@ define void @test_trivial_extract_ptr([1 x ptr] %s, i8 %val) {
; CHECK: %1:_(s32) = COPY $w1
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
+; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
; CHECK: G_STORE [[LD1]](s8), %0(p0) :: (store (s8) into %ir.addr, align 4)
-; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
+; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64)
; CHECK: G_STORE [[LD2]](s8), [[GEP4]](p0) :: (store (s8) into %ir.addr + 4, align 4)
-; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
+; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64)
; CHECK: G_STORE %1(s32), [[GEP5]](p0) :: (store (s32) into %ir.addr + 8)
-; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
+; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64)
; CHECK: G_STORE [[LD4]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 12)
define void @test_insertvalue(ptr %addr, i32 %val) {
%struct = load %struct.nested, ptr %addr
@@ -899,23 +899,23 @@ define [1 x ptr] @test_trivial_insert_ptr([1 x ptr] %s, ptr %val) {
; CHECK: %1:_(p0) = COPY $x1
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %1(p0) :: (load (s8) from %ir.addr2, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %1, [[CST1]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %1, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load (s32) from %ir.addr2 + 4)
; CHECK: [[LD3:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[GEP2]](p0) :: (load (s8) from %ir.addr + 4, align 4)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
+; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD5:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 8)
; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST4]](s64)
+; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST4]](s64)
; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load (s32) from %ir.addr + 12)
; CHECK: G_STORE [[LD3]](s8), %0(p0) :: (store (s8) into %ir.addr, align 4)
-; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
+; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64)
; CHECK: G_STORE [[LD1]](s8), [[GEP5]](p0) :: (store (s8) into %ir.addr + 4, align 4)
-; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
+; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64)
; CHECK: G_STORE [[LD2]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 8)
-; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST4]](s64)
+; CHECK: [[GEP7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST4]](s64)
; CHECK: G_STORE [[LD6]](s32), [[GEP7]](p0) :: (store (s32) into %ir.addr + 12)
define void @test_insertvalue_agg(ptr %addr, ptr %addr2) {
%smallstruct = load {i8, i32}, ptr %addr2
@@ -1905,19 +1905,19 @@ define void @test_phi_diamond(ptr %a.ptr, ptr %b.ptr, i1 %selector, ptr %dst) {
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD [[ARG1]](p0) :: (load (s8) from %ir.a.ptr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG1]], [[CST1]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG1]], [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p0) :: (load (s16) from %ir.a.ptr + 2)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG1]], [[CST2]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG1]], [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.a.ptr + 4)
; CHECK: G_BR %bb.4
; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[ARG2]](p0) :: (load (s8) from %ir.b.ptr, align 4)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
-; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG2]], [[CST3]](s64)
+; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG2]], [[CST3]](s64)
; CHECK: [[LD5:%[0-9]+]]:_(s16) = G_LOAD [[GEP3]](p0) :: (load (s16) from %ir.b.ptr + 2)
; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG2]], [[CST4]](s64)
+; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG2]], [[CST4]](s64)
; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load (s32) from %ir.b.ptr + 4)
; CHECK: [[PN1:%[0-9]+]]:_(s8) = G_PHI [[LD1]](s8), %bb.2, [[LD4]](s8), %bb.3
@@ -1925,10 +1925,10 @@ define void @test_phi_diamond(ptr %a.ptr, ptr %b.ptr, i1 %selector, ptr %dst) {
; CHECK: [[PN3:%[0-9]+]]:_(s32) = G_PHI [[LD3]](s32), %bb.2, [[LD6]](s32), %bb.3
; CHECK: G_STORE [[PN1]](s8), [[ARG4]](p0) :: (store (s8) into %ir.dst, align 4)
; CHECK: [[CST5:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
-; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG4]], [[CST5]](s64)
+; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG4]], [[CST5]](s64)
; CHECK: G_STORE [[PN2]](s16), [[GEP5]](p0) :: (store (s16) into %ir.dst + 2)
; CHECK: [[CST6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG4]], [[CST6]](s64)
+; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG4]], [[CST6]](s64)
; CHECK: G_STORE [[PN3]](s32), [[GEP6]](p0) :: (store (s32) into %ir.dst + 4)
; CHECK: RET_ReallyLR
@@ -1964,22 +1964,22 @@ define void @test_nested_aggregate_const(ptr %ptr) {
; CHECK: [[CST6:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
; CHECK: G_STORE [[CST1]](s32), [[BASE]](p0) :: (store (s32) into %ir.ptr, align 8)
; CHECK: [[CST7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST7]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST7]](s64)
; CHECK: G_STORE [[CST1]](s32), [[GEP1]](p0) :: (store (s32) into %ir.ptr + 4)
; CHECK: [[CST8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST8]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST8]](s64)
; CHECK: G_STORE [[CST2]](s16), [[GEP2]](p0) :: (store (s16) into %ir.ptr + 8, align 8)
; CHECK: [[CST9:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
-; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST9]](s64)
+; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST9]](s64)
; CHECK: G_STORE [[CST3]](s8), [[GEP3]](p0) :: (store (s8) into %ir.ptr + 10, align 2)
; CHECK: [[CST10:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST10]](s64)
+; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST10]](s64)
; CHECK: G_STORE [[CST4]](s64), [[GEP4]](p0) :: (store (s64) into %ir.ptr + 16)
; CHECK: [[CST11:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST11]](s64)
+; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST11]](s64)
; CHECK: G_STORE [[CST5]](s64), [[GEP5]](p0) :: (store (s64) into %ir.ptr + 24)
; CHECK: [[CST12:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST12]](s64)
+; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST12]](s64)
; CHECK: G_STORE [[CST6]](s32), [[GEP6]](p0) :: (store (s32) into %ir.ptr + 32, align 8)
store %agg.nested { i32 1, i32 1, %agg.inner { i16 2, i8 3, %agg.inner.inner {i64 5, i64 8} }, i32 13}, ptr %ptr
ret void
@@ -2519,7 +2519,7 @@ define {i8, i32} @test_freeze_struct(ptr %addr) {
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]]
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]]
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0)
; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s8) = G_FREEZE [[LOAD]]
; CHECK-NEXT: [[FREEZE1:%[0-9]+]]:_(s32) = G_FREEZE [[LOAD1]]
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
index 2779e89..4a85d84 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
@@ -12,7 +12,7 @@ define i32 @val_compare_and_swap(ptr %p, i32 %cmp, i32 %new) {
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK-NEXT: liveins: $w1, $w2, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -46,13 +46,13 @@ define i32 @val_compare_and_swap_from_load(ptr %p, i32 %cmp, ptr %pnew) {
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: liveins: $w1, $x0, $x2
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w9 = LDRWui killed renamable $x2, 0, implicit-def $x9, pcsections !0 :: (load (s32) from %ir.pnew)
+ ; CHECK-NEXT: renamable $w9 = LDRWui killed renamable $x2, 0, implicit-def renamable $x9, pcsections !0 :: (load (s32) from %ir.pnew)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.cmpxchg.start:
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0, $x9
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -91,7 +91,7 @@ define i32 @val_compare_and_swap_rel(ptr %p, i32 %cmp, i32 %new) {
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK-NEXT: liveins: $w1, $w2, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -243,7 +243,7 @@ define i32 @fetch_and_nand(ptr %p) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: renamable $w9 = ANDWri renamable $w8, 2, pcsections !0
; CHECK-NEXT: $w9 = ORNWrs $wzr, killed renamable $w9, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRW killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p)
@@ -295,7 +295,7 @@ define i32 @fetch_and_or(ptr %p) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
+ ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p)
; CHECK-NEXT: $w10 = ORRWrs renamable $w8, renamable $w9, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STLXRW killed renamable $w10, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
@@ -726,7 +726,7 @@ define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -750,7 +750,7 @@ define i8 @atomicrmw_xchg_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: early-clobber renamable $w9 = STXRB renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -773,7 +773,7 @@ define i8 @atomicrmw_sub_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -797,7 +797,7 @@ define i8 @atomicrmw_and_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -821,7 +821,7 @@ define i8 @atomicrmw_or_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -845,7 +845,7 @@ define i8 @atomicrmw_xor_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -869,7 +869,7 @@ define i8 @atomicrmw_min_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 11, implicit killed $nzcv, pcsections !0
@@ -895,7 +895,7 @@ define i8 @atomicrmw_max_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, pcsections !0
@@ -923,10 +923,10 @@ define i8 @atomicrmw_umin_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 7, implicit killed $x8
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0
- ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def $x10, pcsections !0
+ ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STLXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -951,10 +951,10 @@ define i8 @atomicrmw_umax_i8(ptr %ptr, i8 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 7, implicit killed $x8
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0
- ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0
+ ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -977,7 +977,7 @@ define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1001,7 +1001,7 @@ define i16 @atomicrmw_xchg_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: early-clobber renamable $w9 = STXRH renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -1024,7 +1024,7 @@ define i16 @atomicrmw_sub_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1048,7 +1048,7 @@ define i16 @atomicrmw_and_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1072,7 +1072,7 @@ define i16 @atomicrmw_or_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1096,7 +1096,7 @@ define i16 @atomicrmw_xor_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, pcsections !0
; CHECK-NEXT: early-clobber renamable $w10 = STXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0
@@ -1120,7 +1120,7 @@ define i16 @atomicrmw_min_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 11, implicit killed $nzcv, pcsections !0
@@ -1146,7 +1146,7 @@ define i16 @atomicrmw_max_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w1, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, pcsections !0
@@ -1174,10 +1174,10 @@ define i16 @atomicrmw_umin_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 15, implicit killed $x8
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0
- ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def $x10, pcsections !0
+ ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STLXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -1202,10 +1202,10 @@ define i16 @atomicrmw_umax_i16(ptr %ptr, i16 %rhs) {
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
; CHECK-NEXT: liveins: $w9, $x0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 15, implicit killed $x8
; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0
- ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0
+ ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0
; CHECK-NEXT: early-clobber renamable $w11 = STXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr)
; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0
; CHECK-NEXT: {{ $}}
@@ -1230,7 +1230,7 @@ define { i8, i1 } @cmpxchg_i8(ptr %ptr, i8 %desired, i8 %new) {
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000)
; CHECK-NEXT: liveins: $w1, $w2, $x8
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w0 = LDXRB renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s8) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w0 = LDXRB renamable $x8, implicit-def renamable $x0, pcsections !0 :: (volatile load (s8) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 7, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 0, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0
@@ -1272,7 +1272,7 @@ define { i16, i1 } @cmpxchg_i16(ptr %ptr, i16 %desired, i16 %new) {
; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000)
; CHECK-NEXT: liveins: $w1, $w2, $x8
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $w0 = LDXRH renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s16) from %ir.ptr)
+ ; CHECK-NEXT: renamable $w0 = LDXRH renamable $x8, implicit-def renamable $x0, pcsections !0 :: (volatile load (s16) from %ir.ptr)
; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 15, pcsections !0
; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 8, implicit-def $nzcv, pcsections !0
; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-sret-demotion.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-sret-demotion.ll
index a8520af..08021cc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-sret-demotion.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-sret-demotion.ll
@@ -11,28 +11,28 @@ define [9 x i64] @callee_sret_demotion() {
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64))
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64))
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD1]](p0) :: (store (s64))
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD2]](p0) :: (store (s64))
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD3]](p0) :: (store (s64))
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD4]](p0) :: (store (s64))
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD5]](p0) :: (store (s64))
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD6]](p0) :: (store (s64))
; CHECK-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD7]](p0) :: (store (s64))
; CHECK-NEXT: RET_ReallyLR
ret [9 x i64] zeroinitializer
@@ -48,28 +48,28 @@ define i64 @caller() {
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C3]](s64)
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD3]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C4]](s64)
; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD4]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C5]](s64)
; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD5]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C6]](s64)
; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C7]](s64)
; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD7]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: $x0 = COPY [[LOAD4]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
@@ -88,28 +88,28 @@ define i64 @caller_tail() {
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C3]](s64)
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD3]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C4]](s64)
; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD4]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C5]](s64)
; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD5]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C6]](s64)
; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C7]](s64)
; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD7]](p0) :: (load (s64) from %stack.0)
; CHECK-NEXT: $x0 = COPY [[LOAD4]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll
index 4aac649..39860a7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll
@@ -4,7 +4,7 @@
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load (s64) from %ir.ptr + 8)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll
index b10c887e..b3e436b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll
@@ -61,7 +61,7 @@ define void @take_128bit_struct(ptr %ptr, [2 x i64] %in) {
; CHECK-LABEL: name: test_split_struct
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST]](s64)
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (load (s64) from %ir.ptr + 8)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
index ca8f5de..36529be 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
@@ -67,10 +67,10 @@ define void @test_multiple_args(i64 %in) {
; CHECK: G_STORE [[DBL]](s64), [[ADDR]](p0) :: (store (s64) into %ir.addr)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: G_STORE [[I64]](s64), [[GEP1]](p0) :: (store (s64) into %ir.addr + 8)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST2]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST2]](s64)
; CHECK: G_STORE [[I8]](s8), [[GEP2]](p0) :: (store (s8) into %ir.addr + 16, align 8)
; CHECK: RET_ReallyLR
define void @test_struct_formal({double, i64, i8} %in, ptr %addr) {
@@ -84,10 +84,10 @@ define void @test_struct_formal({double, i64, i8} %in, ptr %addr) {
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load (s64) from %ir.addr)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load (s64) from %ir.addr + 8)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST2]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 16, align 8)
; CHECK: $d0 = COPY [[LD1]](s64)
@@ -103,13 +103,13 @@ define {double, i64, i32} @test_struct_return(ptr %addr) {
; CHECK: %0:_(p0) = COPY $x0
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.addr)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
+; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load (s64) from %ir.addr + 8)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
+; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s64) = G_LOAD [[GEP2]](p0) :: (load (s64) from %ir.addr + 16)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
+; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP3]](p0) :: (load (s64) from %ir.addr + 24)
; CHECK: $x0 = COPY [[LD1]](s64)
@@ -286,7 +286,7 @@ define void @take_128bit_struct(ptr %ptr, [2 x i64] %in) {
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
+; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load (s64) from %ir.ptr + 8)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
index f50540b..1c0fc3f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
@@ -38,44 +38,44 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD6]](p0) :: (load (s128) from %ir.1 + 64, align 4)
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[LOAD4]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 64, align 4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
- ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s64)
; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD8]](p0) :: (load (s128) from %ir.1 + 80, align 4)
- ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
- ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C5]](s64)
; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD10]](p0) :: (load (s128) from %ir.1 + 96, align 4)
- ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[LOAD6]](s128), [[PTR_ADD11]](p0) :: (store (s128) into %ir.0 + 96, align 4)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
- ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C6]](s64)
; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD12]](p0) :: (load (s128) from %ir.1 + 112, align 4)
- ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CHECK-NEXT: G_STORE [[LOAD7]](s128), [[PTR_ADD13]](p0) :: (store (s128) into %ir.0 + 112, align 4)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
- ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C7]](s64)
; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4)
- ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CHECK-NEXT: G_STORE [[LOAD8]](s128), [[PTR_ADD15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
index b21046d..97a0417 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
@@ -111,24 +111,24 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %ir.1 + 64, align 4)
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p0) :: (store (s64) into %ir.0 + 64, align 4)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -159,24 +159,24 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %ir.1 + 64, align 4)
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p0) :: (store (s64) into %ir.0 + 64, align 4)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -235,44 +235,44 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD6]](p0) :: (load (s128) from %ir.1 + 64, align 4)
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[LOAD4]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 64, align 4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
- ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s64)
; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD8]](p0) :: (load (s128) from %ir.1 + 80, align 4)
- ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
- ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C5]](s64)
; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD10]](p0) :: (load (s128) from %ir.1 + 96, align 4)
- ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[LOAD6]](s128), [[PTR_ADD11]](p0) :: (store (s128) into %ir.0 + 96, align 4)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
- ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C6]](s64)
; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD12]](p0) :: (load (s128) from %ir.1 + 112, align 4)
- ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CHECK-NEXT: G_STORE [[LOAD7]](s128), [[PTR_ADD13]](p0) :: (store (s128) into %ir.0 + 112, align 4)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
- ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C7]](s64)
; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4)
- ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CHECK-NEXT: G_STORE [[LOAD8]](s128), [[PTR_ADD15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -303,24 +303,24 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p2) :: (load (s128) from %ir.1, align 4, addrspace 2)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p1) :: (store (s128) into %ir.0, align 4, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p2) :: (load (s128) from %ir.1 + 16, align 4, addrspace 2)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p1) :: (store (s128) into %ir.0 + 16, align 4, addrspace 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p2) :: (load (s128) from %ir.1 + 32, align 4, addrspace 2)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p1) :: (store (s128) into %ir.0 + 32, align 4, addrspace 1)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p2) :: (load (s128) from %ir.1 + 48, align 4, addrspace 2)
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p1) :: (store (s128) into %ir.0 + 48, align 4, addrspace 1)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p2) :: (load (s64) from %ir.1 + 64, align 4, addrspace 2)
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p1) :: (store (s64) into %ir.0 + 64, align 4, addrspace 1)
; CHECK-NEXT: RET_ReallyLR
%0:_(p1) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
index 57d031d..fc4fbac 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
@@ -89,17 +89,17 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from %ir.1 + 32, align 4)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD2]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -124,35 +124,35 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from %ir.1 + 32, align 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 48, align 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD3]](p0) :: (load (s128) from %ir.1 + 64, align 4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s64)
; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 80, align 4)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD6]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 48, align 4)
; CHECK-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; CHECK-NEXT: G_STORE [[LOAD4]](s128), [[PTR_ADD8]](p0) :: (store (s128) into %ir.0 + 64, align 4)
; CHECK-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
- ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
+ ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s64)
; CHECK-NEXT: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -177,23 +177,23 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from %ir.1 + 32, align 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.1 + 48)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD4]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[LOAD3]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %ir.0 + 48)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -218,17 +218,17 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p2) = COPY $x1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p2) :: (load (s128) from %ir.1, align 4, addrspace 2)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p2) :: (load (s128) from %ir.1 + 16, align 4, addrspace 2)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p2) :: (load (s128) from %ir.1 + 32, align 4, addrspace 2)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p1) :: (store (s128) into %ir.0, align 4, addrspace 1)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD2]](p1) :: (store (s128) into %ir.0 + 16, align 4, addrspace 1)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p1) :: (store (s128) into %ir.0 + 32, align 4, addrspace 1)
; CHECK-NEXT: RET_ReallyLR
%0:_(p1) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
index f8d2bf3..b06cadf 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
@@ -100,7 +100,7 @@ body: |
; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
; CHECK-NEXT: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -127,13 +127,13 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>) into %ir.dst, align 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %ir.dst + 16, align 1)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into %ir.dst + 32, align 1)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into %ir.dst + 48, align 1)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -160,7 +160,7 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -190,13 +190,13 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MUL]](s64), [[MUL]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>) into %ir.dst, align 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %ir.dst + 16, align 1)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into %ir.dst + 32, align 1)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 44
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into %ir.dst + 44, align 1)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -222,11 +222,11 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 16448
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[C2]](s16), [[PTR_ADD1]](p0) :: (store (s16) into %ir.dst + 16, align 1)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -254,7 +254,7 @@ body: |
; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
; CHECK-NEXT: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
index 8d8f717..7393091 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
@@ -46,9 +46,9 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir
index fa1700a..1a21064 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir
@@ -32,11 +32,11 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 16)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64)
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16) from unknown-address + 8, align 8)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 10, align 2)
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD1]](s32), [[DEF]](s32)
@@ -48,7 +48,7 @@ body: |
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 16)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
; CHECK-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY1]](p0) :: (load (s16) from unknown-address + 8, align 8)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 10, align 2)
; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD3]](s32), [[DEF]](s32)
; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[C3]](s64)
@@ -61,7 +61,7 @@ body: |
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[AND1]](s64)
; CHECK-NEXT: G_STORE [[COPY2]](s64), %ptr(p0) :: (store (s64), align 16)
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C3]](s64)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 8, align 8)
; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD3]](p0) :: (store (s8) into unknown-address + 10, align 2)
%ptr:_(p0) = COPY $x0
@@ -96,16 +96,16 @@ body: |
; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[AND4]], [[C1]]
; CHECK-NEXT: G_STORE [[AND5]](s64), %ptr(p0) :: (store (s64), align 64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64)
; CHECK-NEXT: G_STORE [[AND6]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64)
; CHECK-NEXT: G_STORE [[AND7]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64)
; CHECK-NEXT: G_STORE [[AND8]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64)
; CHECK-NEXT: G_STORE [[AND9]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s318) = G_IMPLICIT_DEF
@@ -140,16 +140,16 @@ body: |
; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[AND4]], [[C1]]
; CHECK-NEXT: G_STORE [[AND5]](s64), %ptr(p0) :: (store (s64), align 64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64)
; CHECK-NEXT: G_STORE [[AND6]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64)
; CHECK-NEXT: G_STORE [[AND7]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64)
; CHECK-NEXT: G_STORE [[AND8]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64)
; CHECK-NEXT: G_STORE [[AND9]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s318) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir
index b0736fb..2378401 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir
@@ -195,13 +195,13 @@ body: |
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C1]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[UV]](s64), [[COPY]](p0) :: (store (s32), align 16)
; CHECK-NEXT: G_STORE [[LSHR1]](s64), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 4, align 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C1]](s64)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[UV1]](s64), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 6, align 2)
; CHECK-NEXT: G_STORE [[LSHR2]](s64), [[PTR_ADD2]](p0) :: (store (s16) into unknown-address + 10)
; CHECK-NEXT: RET_ReallyLR
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir
index 96be30b..c301e76 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir
@@ -97,16 +97,16 @@ body: |
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[C1]], [[C3]]
; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64), align 64)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64)
; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64)
; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C6]](s64)
; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C7]](s64)
; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
%cst:_(s318) = G_CONSTANT i318 1234
@@ -136,10 +136,10 @@ body: |
; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[AND2]](s64), 0
; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64), align 32)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64)
; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64)
; CHECK-NEXT: G_STORE [[EXTRACT]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 16, align 16)
; CHECK-NEXT: RET_ReallyLR implicit $w0
%cst:_(s158) = G_CONSTANT i158 1234
@@ -170,10 +170,10 @@ body: |
; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s16) = G_EXTRACT [[AND2]](s64), 0
; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64), align 32)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64)
; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64)
; CHECK-NEXT: G_STORE [[EXTRACT]](s16), [[PTR_ADD1]](p0) :: (store (s16) into unknown-address + 16, align 16)
; CHECK-NEXT: RET_ReallyLR implicit $w0
%cst:_(s142) = G_CONSTANT i142 1234
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir
index b0b0e6b..dafc304 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir
@@ -328,7 +328,7 @@ body: |
; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; CHECK-NEXT: G_STORE [[COPY]](<2 x s64>), [[FRAME_INDEX]](p0) :: (store (<2 x s64>) into %stack.0, align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; CHECK-NEXT: G_STORE [[COPY1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %stack.0 + 16, basealign 32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND %idx, [[C1]]
@@ -426,7 +426,7 @@ body: |
; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; CHECK-NEXT: G_STORE [[COPY]](<4 x s32>), [[FRAME_INDEX]](p0) :: (store (<4 x s32>) into %stack.0, align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; CHECK-NEXT: G_STORE [[COPY1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into %stack.0 + 16, basealign 32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND %idxprom, [[C1]]
@@ -460,7 +460,7 @@ body: |
; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; CHECK-NEXT: G_STORE [[COPY]](<8 x s16>), [[FRAME_INDEX]](p0) :: (store (<8 x s16>) into %stack.0, align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; CHECK-NEXT: G_STORE [[COPY1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into %stack.0 + 16, basealign 32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND %idxprom, [[C1]]
@@ -495,7 +495,7 @@ body: |
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[DEF]](<2 x p0>)
; CHECK-NEXT: G_STORE [[BITCAST]](<2 x s64>), [[FRAME_INDEX]](p0) :: (store (<2 x s64>) into %stack.0, align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[DEF]](<2 x p0>)
; CHECK-NEXT: G_STORE [[BITCAST1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %stack.0 + 16, basealign 32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir
index 588dfd9..1c10e08 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir
@@ -22,7 +22,7 @@ body: |
; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV1]](<2 x s32>)
; CHECK-NEXT: G_STORE [[FPEXT]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%0:_(<4 x s32>) = COPY $q0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir
index e1b6437..a19ab0b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir
@@ -135,7 +135,7 @@ body: |
; CHECK-NEXT: [[CONCAT_VECTORS1:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[FPTRUNC2]](<2 x s32>), [[FPTRUNC3]](<2 x s32>)
; CHECK-NEXT: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY5]](p0) :: (store (<4 x s32>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY5]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY5]], [[C]](s64)
; CHECK-NEXT: G_STORE [[CONCAT_VECTORS1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%2:_(<2 x s64>) = COPY $q0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
index 11c6c7f..858a5a2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
@@ -258,10 +258,10 @@ body: |
; CHECK-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UITOFP]](<4 x s32>)
; CHECK-NEXT: G_STORE [[UV10]](s32), [[COPY]](p0) :: (store (s32), align 16)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[UV11]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[UV12]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 8, align 8)
; CHECK-NEXT: G_BR %bb.1
bb.1:
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector.mir
index 3a2c57a..29a3e38 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector.mir
@@ -46,7 +46,7 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>))
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x8
@@ -72,7 +72,7 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>))
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x8
@@ -95,7 +95,7 @@ body: |
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64), align 16)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[C1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x8
@@ -140,7 +140,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x8
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; CHECK-NEXT: $q0 = COPY [[LOAD]](<2 x s64>)
; CHECK-NEXT: $q1 = COPY [[LOAD1]](<2 x s64>)
@@ -166,7 +166,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x8
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; CHECK-NEXT: $q0 = COPY [[LOAD]](<2 x s64>)
; CHECK-NEXT: $q1 = COPY [[LOAD1]](<2 x s64>)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
index 94bdcf7..2c326902 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
@@ -332,7 +332,7 @@ body: |
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[DEF]](<16 x s8>), %ptr(p0) :: (store (<16 x s8>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: G_STORE [[DEF]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%val:_(<32 x s8>) = G_IMPLICIT_DEF
@@ -355,7 +355,7 @@ body: |
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[DEF]](<8 x s16>), %ptr(p0) :: (store (<8 x s16>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: G_STORE [[DEF]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%val:_(<16 x s16>) = G_IMPLICIT_DEF
@@ -378,7 +378,7 @@ body: |
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[DEF]](<4 x s32>), %ptr(p0) :: (store (<4 x s32>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: G_STORE [[DEF]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%val:_(<8 x s32>) = G_IMPLICIT_DEF
@@ -401,7 +401,7 @@ body: |
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%val:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -423,10 +423,10 @@ body: |
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD %ptr(p0) :: (load (<16 x s8>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[PTR_ADD]](p0) :: (load (<16 x s8>) from unknown-address + 16)
; CHECK-NEXT: G_STORE [[LOAD]](<16 x s8>), %ptr(p0) :: (store (<16 x s8>), align 32)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](<16 x s8>), [[PTR_ADD1]](p0) :: (store (<16 x s8>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%ptr:_(p0) = COPY $x0
@@ -448,10 +448,10 @@ body: |
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD %ptr(p0) :: (load (<8 x s16>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load (<8 x s16>) from unknown-address + 16)
; CHECK-NEXT: G_STORE [[LOAD]](<8 x s16>), %ptr(p0) :: (store (<8 x s16>), align 32)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](<8 x s16>), [[PTR_ADD1]](p0) :: (store (<8 x s16>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%ptr:_(p0) = COPY $x0
@@ -473,10 +473,10 @@ body: |
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; CHECK-NEXT: G_STORE [[LOAD]](<4 x s32>), %ptr(p0) :: (store (<4 x s32>), align 32)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](<4 x s32>), [[PTR_ADD1]](p0) :: (store (<4 x s32>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%ptr:_(p0) = COPY $x0
@@ -498,10 +498,10 @@ body: |
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load (<2 x s64>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; CHECK-NEXT: G_STORE [[LOAD]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 32)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%ptr:_(p0) = COPY $x0
@@ -549,10 +549,10 @@ body: |
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64)
; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into unknown-address + 32)
; CHECK-NEXT: RET_ReallyLR
%val:_(<6 x s64>) = G_IMPLICIT_DEF
@@ -575,7 +575,7 @@ body: |
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
; CHECK-NEXT: G_STORE [[UV]](s16), [[COPY]](p0) :: (store (s16), align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UV1]](s16), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 2)
; CHECK-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
@@ -597,7 +597,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load (s16), align 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s16)
@@ -626,10 +626,10 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 16)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64)
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 8, align 8)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 10, align 2)
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD1]](s32), [[DEF]](s32)
@@ -641,9 +641,9 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[OR1]](s64)
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[OR2]](s64)
; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64), align 16)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64)
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C3]](s64)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[PTR_ADD2]](p0) :: (store (s16) into unknown-address + 8, align 8)
; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD3]](p0) :: (store (s8) into unknown-address + 10, align 2)
; CHECK-NEXT: RET_ReallyLR
@@ -710,19 +710,19 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load (<2 x s64>), align 64)
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD1]](<2 x s64>)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 32, align 32)
; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD2]](<2 x s64>)
; CHECK-NEXT: [[BITCAST3:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST]](<2 x p0>)
; CHECK-NEXT: G_STORE [[BITCAST3]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 64)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; CHECK-NEXT: [[BITCAST4:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST1]](<2 x p0>)
; CHECK-NEXT: G_STORE [[BITCAST4]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into unknown-address + 16)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64)
; CHECK-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST2]](<2 x p0>)
; CHECK-NEXT: G_STORE [[BITCAST5]](<2 x s64>), [[PTR_ADD3]](p0) :: (store (<2 x s64>) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
index fae979d..30afd7e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
@@ -61,7 +61,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[SMIN]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[SMIN1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
%vec:_(<32 x s8>) = G_IMPLICIT_DEF
%vec1:_(<32 x s8>) = G_IMPLICIT_DEF
@@ -130,7 +130,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[SMIN]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[SMIN1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
%vec:_(<16 x s16>) = G_IMPLICIT_DEF
%vec1:_(<16 x s16>) = G_IMPLICIT_DEF
@@ -199,7 +199,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[SMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[SMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
%vec:_(<8 x s32>) = G_IMPLICIT_DEF
%vec1:_(<8 x s32>) = G_IMPLICIT_DEF
@@ -262,7 +262,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[OR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>), align 32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
%vec:_(<4 x s64>) = G_IMPLICIT_DEF
%vec1:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -331,7 +331,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[UMIN]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UMIN1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
%vec:_(<32 x s8>) = G_IMPLICIT_DEF
%vec1:_(<32 x s8>) = G_IMPLICIT_DEF
@@ -400,7 +400,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[UMIN]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UMIN1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
%vec:_(<16 x s16>) = G_IMPLICIT_DEF
%vec1:_(<16 x s16>) = G_IMPLICIT_DEF
@@ -469,7 +469,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[UMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
%vec:_(<8 x s32>) = G_IMPLICIT_DEF
%vec1:_(<8 x s32>) = G_IMPLICIT_DEF
@@ -532,7 +532,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[OR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>), align 32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
%vec:_(<4 x s64>) = G_IMPLICIT_DEF
%vec1:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -623,7 +623,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[SMAX]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[SMAX1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
%vec:_(<32 x s8>) = G_IMPLICIT_DEF
%vec1:_(<32 x s8>) = G_IMPLICIT_DEF
@@ -670,7 +670,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[SMAX]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[SMAX1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
%vec:_(<16 x s16>) = G_IMPLICIT_DEF
%vec1:_(<16 x s16>) = G_IMPLICIT_DEF
@@ -739,7 +739,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[SMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[SMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
%vec:_(<8 x s32>) = G_IMPLICIT_DEF
%vec1:_(<8 x s32>) = G_IMPLICIT_DEF
@@ -802,7 +802,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[OR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>), align 32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
%vec:_(<4 x s64>) = G_IMPLICIT_DEF
%vec1:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -871,7 +871,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[UMAX]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UMAX1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
%vec:_(<32 x s8>) = G_IMPLICIT_DEF
%vec1:_(<32 x s8>) = G_IMPLICIT_DEF
@@ -940,7 +940,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[UMAX]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UMAX1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
%vec:_(<16 x s16>) = G_IMPLICIT_DEF
%vec1:_(<16 x s16>) = G_IMPLICIT_DEF
@@ -1009,7 +1009,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[UMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
%vec:_(<8 x s32>) = G_IMPLICIT_DEF
%vec1:_(<8 x s32>) = G_IMPLICIT_DEF
@@ -1072,7 +1072,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: G_STORE [[OR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>), align 32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
%vec:_(<4 x s64>) = G_IMPLICIT_DEF
%vec1:_(<4 x s64>) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-non-pow2-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-non-pow2-load-store.mir
index 332f933..b6488e9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-non-pow2-load-store.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-non-pow2-load-store.mir
@@ -16,13 +16,13 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16), align 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 2, align 2)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C2]](s64)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[OR]], [[C2]](s64)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[OR]](s32), [[COPY1]](p0) :: (store (s16), align 4)
; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store (s8) into unknown-address + 2, align 2)
; CHECK-NEXT: $w0 = COPY [[C]](s32)
@@ -54,13 +54,13 @@ body: |
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[C]], [[C1]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s32), align 8)
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C3]](s64)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 4, align 4)
; CHECK-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p0) :: (store (s8) into unknown-address + 6, align 2)
; CHECK-NEXT: RET_ReallyLR
@@ -91,16 +91,16 @@ body: |
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[DEF]], [[C1]]
; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64))
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64)
; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64)
; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64)
; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64)
; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32)
; CHECK-NEXT: RET_ReallyLR
%ptr:_(p0) = COPY $x0
@@ -130,10 +130,10 @@ body: |
; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[AND2]](s64), 0
; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64))
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64)
; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64)
; CHECK-NEXT: G_STORE [[EXTRACT]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 16, align 8)
; CHECK-NEXT: RET_ReallyLR
%ptr:_(p0) = COPY $x0
@@ -163,10 +163,10 @@ body: |
; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s16) = G_EXTRACT [[AND2]](s64), 0
; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64))
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64)
; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64)
; CHECK-NEXT: G_STORE [[EXTRACT]](s16), [[PTR_ADD1]](p0) :: (store (s16) into unknown-address + 16, align 8)
; CHECK-NEXT: RET_ReallyLR
%ptr:_(p0) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
index 7b3be34..9edc1cb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
@@ -84,16 +84,16 @@ body: |
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[OR4]], [[C1]]
; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64), align 64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64)
; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64)
; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64)
; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64)
; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s318) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir
index 7dbe3fe..47aa570 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir
@@ -715,7 +715,7 @@ body: |
; CHECK-NEXT: %ptr2:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr1(p0) :: (load (<2 x s64>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr1, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr1, [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -728,7 +728,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr2(p0) :: (load (<2 x s64>), align 32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr2, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr2, [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
@@ -903,7 +903,7 @@ body: |
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr1(p0) :: (load (<2 x s64>), align 32)
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr1, [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr1, [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD1]](<2 x s64>)
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
@@ -918,7 +918,7 @@ body: |
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr2(p0) :: (load (<2 x s64>), align 32)
; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD2]](<2 x s64>)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr2, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr2, [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; CHECK-NEXT: [[BITCAST3:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD3]](<2 x s64>)
; CHECK-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir
index af03a21..2e70252 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir
@@ -165,7 +165,7 @@ body: |
; CHECK-NEXT: [[SHUF1:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY3]](<2 x s64>), [[COPY]], shufflemask(1, 2)
; CHECK-NEXT: G_STORE [[SHUF]](<2 x s64>), [[COPY4]](p0) :: (store (<2 x s64>), align 32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY4]], [[C]](s64)
; CHECK-NEXT: G_STORE [[SHUF1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%3:_(<2 x s64>) = COPY $q0
@@ -208,7 +208,7 @@ body: |
; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[COPY1]](<4 x s32>), [[COPY]], shufflemask(2, 6, 5, 3)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](p0) :: (store (<4 x s32>), align 32)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY4]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[SHUF]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
; CHECK-NEXT: RET_ReallyLR
%3:_(<4 x s32>) = COPY $q0
@@ -271,10 +271,10 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR7:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[EVEC2]](s64), [[EVEC3]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR6]](<2 x s64>), [[COPY8]](p0) :: (store (<2 x s64>), align 64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY8]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY8]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR7]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY8]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY8]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[SHUF]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR
%3:_(s64) = COPY $d0
@@ -458,7 +458,7 @@ body: |
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s32>), [[BUILD_VECTOR3]](<2 x s32>)
; CHECK-NEXT: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY8]](p0) :: (store (<4 x s32>), align 32)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY8]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY8]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR4]](<2 x s32>), [[PTR_ADD]](p0) :: (store (<2 x s32>) into unknown-address + 16, align 16)
; CHECK-NEXT: RET_ReallyLR
%3:_(s32) = COPY $s0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir
index e665637..4f93f69 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir
@@ -24,20 +24,20 @@ body: |
; CHECK-LINUX-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK-LINUX-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load (s64))
; CHECK-LINUX-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-LINUX-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-LINUX-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-LINUX-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8)
; CHECK-LINUX-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-LINUX-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-LINUX-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-LINUX-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from unknown-address + 16)
; CHECK-LINUX-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-LINUX-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-LINUX-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-LINUX-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from unknown-address + 24)
; CHECK-LINUX-NEXT: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store (s64))
- ; CHECK-LINUX-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-LINUX-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-LINUX-NEXT: G_STORE [[LOAD1]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 8)
- ; CHECK-LINUX-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-LINUX-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-LINUX-NEXT: G_STORE [[LOAD2]](s64), [[PTR_ADD4]](p0) :: (store (s64) into unknown-address + 16)
- ; CHECK-LINUX-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-LINUX-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-LINUX-NEXT: G_STORE [[LOAD3]](s64), [[PTR_ADD5]](p0) :: (store (s64) into unknown-address + 24)
; CHECK-LINUX-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
index 9c528623..1e1ae01 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
@@ -46,16 +46,16 @@ body: |
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[XOR4]], [[C1]]
; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64), align 64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64)
; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64)
; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64)
; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64)
; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s318) = G_IMPLICIT_DEF
@@ -90,16 +90,16 @@ body: |
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[XOR4]], [[C1]]
; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64), align 64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64)
; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64)
; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64)
; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64)
; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s319) = G_IMPLICIT_DEF
@@ -133,10 +133,10 @@ body: |
; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[AND2]](s64), 0
; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64), align 32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64)
; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64)
; CHECK-NEXT: G_STORE [[EXTRACT]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 16, align 16)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%a:_(s158) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
index 09e5a15..a422f60 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
@@ -667,11 +667,10 @@ body: |
; SELECT-NEXT: {{ $}}
; SELECT-NEXT: %zero:gpr64 = COPY $xzr
; SELECT-NEXT: %reg0:gpr64 = COPY $x0
- ; SELECT-NEXT: %shl:gpr64 = UBFMXri %reg0, 1, 0
+ ; SELECT-NEXT: %cmp_lhs:gpr64 = SUBSXrs %zero, %reg0, 63, implicit-def dead $nzcv
; SELECT-NEXT: %reg1:gpr64 = COPY $x1
; SELECT-NEXT: %sext_in_reg:gpr64 = SBFMXri %reg1, 0, 0
- ; SELECT-NEXT: %cmp_rhs:gpr64 = SUBSXrs %zero, %sext_in_reg, 131, implicit-def dead $nzcv
- ; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %shl, %cmp_rhs, implicit-def $nzcv
+ ; SELECT-NEXT: [[ADDSXrs:%[0-9]+]]:gpr64 = ADDSXrs %cmp_lhs, %sext_in_reg, 131, implicit-def $nzcv
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; SELECT-NEXT: $w0 = COPY %cmp
; SELECT-NEXT: RET_ReallyLR implicit $w0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-isel-csinc-type.ll b/llvm/test/CodeGen/AArch64/aarch64-isel-csinc-type.ll
index 7706ca9..9fab3d1 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-isel-csinc-type.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-isel-csinc-type.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-- -o - < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-- -o - < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-- -global-isel -o - < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; Verify that we can fold csneg/csel into csinc instruction.
@@ -8,12 +9,20 @@ target triple = "aarch64-unknown-linux-gnu"
; char csinc1 (char a, char b) { return !a ? b+1 : b+3; }
define i8 @csinc1(i8 %a, i8 %b) local_unnamed_addr #0 {
-; CHECK-LABEL: csinc1:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: tst w0, #0xff
-; CHECK-NEXT: add w8, w1, #3
-; CHECK-NEXT: csinc w0, w8, w1, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: csinc1:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tst w0, #0xff
+; CHECK-SD-NEXT: add w8, w1, #3
+; CHECK-SD-NEXT: csinc w0, w8, w1, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: csinc1:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: tst w0, #0xff
+; CHECK-GI-NEXT: csinc w8, w8, wzr, ne
+; CHECK-GI-NEXT: add w0, w8, w1
+; CHECK-GI-NEXT: ret
entry:
%tobool.not = icmp eq i8 %a, 0
%cond.v = select i1 %tobool.not, i8 1, i8 3
@@ -23,12 +32,20 @@ entry:
; short csinc2 (short a, short b) { return !a ? b+1 : b+3; }
define i16 @csinc2(i16 %a, i16 %b) local_unnamed_addr #0 {
-; CHECK-LABEL: csinc2:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: tst w0, #0xffff
-; CHECK-NEXT: add w8, w1, #3
-; CHECK-NEXT: csinc w0, w8, w1, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: csinc2:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tst w0, #0xffff
+; CHECK-SD-NEXT: add w8, w1, #3
+; CHECK-SD-NEXT: csinc w0, w8, w1, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: csinc2:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: tst w0, #0xffff
+; CHECK-GI-NEXT: csinc w8, w8, wzr, ne
+; CHECK-GI-NEXT: add w0, w8, w1
+; CHECK-GI-NEXT: ret
entry:
%tobool.not = icmp eq i16 %a, 0
%cond.v = select i1 %tobool.not, i16 1, i16 3
@@ -38,12 +55,20 @@ entry:
; int csinc3 (int a, int b) { return !a ? b+1 : b+3; }
define i32 @csinc3(i32 %a, i32 %b) local_unnamed_addr #0 {
-; CHECK-LABEL: csinc3:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: add w8, w1, #3
-; CHECK-NEXT: csinc w0, w8, w1, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: csinc3:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, #0
+; CHECK-SD-NEXT: add w8, w1, #3
+; CHECK-SD-NEXT: csinc w0, w8, w1, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: csinc3:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: csinc w8, w8, wzr, ne
+; CHECK-GI-NEXT: add w0, w8, w1
+; CHECK-GI-NEXT: ret
entry:
%tobool.not = icmp eq i32 %a, 0
%cond.v = select i1 %tobool.not, i32 1, i32 3
@@ -53,12 +78,20 @@ entry:
; long long csinc4 (long long a, long long b) { return !a ? b+1 : b+3; }
define i64 @csinc4(i64 %a, i64 %b) local_unnamed_addr #0 {
-; CHECK-LABEL: csinc4:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmp x0, #0
-; CHECK-NEXT: add x8, x1, #3
-; CHECK-NEXT: csinc x0, x8, x1, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: csinc4:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp x0, #0
+; CHECK-SD-NEXT: add x8, x1, #3
+; CHECK-SD-NEXT: csinc x0, x8, x1, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: csinc4:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: cmp x0, #0
+; CHECK-GI-NEXT: csinc x8, x8, xzr, ne
+; CHECK-GI-NEXT: add x0, x8, x1
+; CHECK-GI-NEXT: ret
entry:
%tobool.not = icmp eq i64 %a, 0
%cond.v = select i1 %tobool.not, i64 1, i64 3
@@ -68,12 +101,21 @@ entry:
; long long csinc8 (long long a, long long b) { return a ? b-1 : b+1; }
define i64 @csinc8(i64 %a, i64 %b) {
-; CHECK-LABEL: csinc8:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sub x8, x1, #1
-; CHECK-NEXT: cmp x0, #0
-; CHECK-NEXT: csinc x0, x8, x1, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: csinc8:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub x8, x1, #1
+; CHECK-SD-NEXT: cmp x0, #0
+; CHECK-SD-NEXT: csinc x0, x8, x1, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: csinc8:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp x0, #0
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: sbfx x8, x8, #0, #1
+; CHECK-GI-NEXT: orr x8, x8, #0x1
+; CHECK-GI-NEXT: add x0, x8, x1
+; CHECK-GI-NEXT: ret
entry:
%tobool.not = icmp eq i64 %a, 0
%cond.v = select i1 %tobool.not, i64 1, i64 -1
@@ -83,15 +125,26 @@ entry:
; long long csinc9 (long long a, long long b) { return a ? b+1 : b-1; }
define i64 @csinc9(i64 %a, i64 %b) {
-; CHECK-LABEL: csinc9:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sub x8, x1, #1
-; CHECK-NEXT: cmp x0, #0
-; CHECK-NEXT: csinc x0, x8, x1, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: csinc9:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sub x8, x1, #1
+; CHECK-SD-NEXT: cmp x0, #0
+; CHECK-SD-NEXT: csinc x0, x8, x1, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: csinc9:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp x0, #0
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: sbfx x8, x8, #0, #1
+; CHECK-GI-NEXT: orr x8, x8, #0x1
+; CHECK-GI-NEXT: add x0, x8, x1
+; CHECK-GI-NEXT: ret
entry:
%tobool.not = icmp eq i64 %a, 0
%cond.v = select i1 %tobool.not, i64 -1, i64 1
%cond = add nsw i64 %cond.v, %b
ret i64 %cond
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
index f7e16b8..9947fba 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
@@ -38,14 +38,12 @@ define <16 x i32> @mul_i32(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-LABEL: mul_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v4.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v5.8h, v1.16b, #0
-; CHECK-GI-NEXT: umull v0.4s, v2.4h, v3.4h
-; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v3.8h
-; CHECK-GI-NEXT: umull v2.4s, v4.4h, v5.4h
-; CHECK-GI-NEXT: umull2 v3.4s, v4.8h, v5.8h
+; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v3.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ushll v0.4s, v2.4h, #0
+; CHECK-GI-NEXT: ushll2 v1.4s, v2.8h, #0
+; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i32>
@@ -75,26 +73,20 @@ define <16 x i64> @mul_i64(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-LABEL: mul_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: ushll v4.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll2 v5.4s, v2.8h, #0
-; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0
-; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0
-; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v16.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v17.4s, v1.8h, #0
-; CHECK-GI-NEXT: umull v0.2d, v4.2s, v2.2s
-; CHECK-GI-NEXT: umull2 v1.2d, v4.4s, v2.4s
-; CHECK-GI-NEXT: umull v2.2d, v5.2s, v3.2s
-; CHECK-GI-NEXT: umull2 v3.2d, v5.4s, v3.4s
-; CHECK-GI-NEXT: umull v4.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: umull2 v5.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: umull v6.2d, v16.2s, v17.2s
-; CHECK-GI-NEXT: umull2 v7.2d, v16.4s, v17.4s
+; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v2.8h, #0
+; CHECK-GI-NEXT: ushll v5.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v7.4s, v0.8h, #0
+; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0
+; CHECK-GI-NEXT: ushll v4.2d, v5.2s, #0
+; CHECK-GI-NEXT: ushll2 v5.2d, v5.4s, #0
+; CHECK-GI-NEXT: ushll v6.2d, v7.2s, #0
+; CHECK-GI-NEXT: ushll2 v7.2d, v7.4s, #0
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i64>
@@ -142,18 +134,12 @@ define <16 x i32> @mla_i32(<16 x i8> %a, <16 x i8> %b, <16 x i32> %c) {
;
; CHECK-GI-LABEL: mla_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: umlal v2.4s, v6.4h, v7.4h
-; CHECK-GI-NEXT: umlal2 v3.4s, v6.8h, v7.8h
-; CHECK-GI-NEXT: umlal v4.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: umlal2 v5.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: umull v6.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v7.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v6.4h
+; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v6.8h
+; CHECK-GI-NEXT: uaddw v2.4s, v4.4s, v7.4h
+; CHECK-GI-NEXT: uaddw2 v3.4s, v5.4s, v7.8h
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i32>
@@ -186,35 +172,21 @@ define <16 x i64> @mla_i64(<16 x i8> %a, <16 x i8> %b, <16 x i64> %c) {
;
; CHECK-GI-LABEL: mla_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: mov v16.16b, v2.16b
-; CHECK-GI-NEXT: mov v17.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
-; CHECK-GI-NEXT: mov v4.16b, v6.16b
-; CHECK-GI-NEXT: mov v5.16b, v7.16b
-; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: ushll v18.4s, v6.4h, #0
-; CHECK-GI-NEXT: ushll v20.4s, v7.4h, #0
-; CHECK-GI-NEXT: ushll2 v19.4s, v6.8h, #0
-; CHECK-GI-NEXT: ushll v21.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll2 v22.4s, v7.8h, #0
-; CHECK-GI-NEXT: ushll v23.4s, v1.4h, #0
-; CHECK-GI-NEXT: ldp q6, q7, [sp]
-; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: umlal v16.2d, v18.2s, v20.2s
-; CHECK-GI-NEXT: umlal2 v17.2d, v18.4s, v20.4s
-; CHECK-GI-NEXT: umlal v2.2d, v19.2s, v22.2s
-; CHECK-GI-NEXT: umlal2 v3.2d, v19.4s, v22.4s
-; CHECK-GI-NEXT: umlal v4.2d, v21.2s, v23.2s
-; CHECK-GI-NEXT: umlal2 v5.2d, v21.4s, v23.4s
-; CHECK-GI-NEXT: umlal v6.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: umlal2 v7.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v16.16b
-; CHECK-GI-NEXT: mov v1.16b, v17.16b
+; CHECK-GI-NEXT: umull v16.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ldp q19, q20, [sp]
+; CHECK-GI-NEXT: ushll v1.4s, v16.4h, #0
+; CHECK-GI-NEXT: ushll2 v16.4s, v16.8h, #0
+; CHECK-GI-NEXT: ushll v17.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v18.4s, v0.8h, #0
+; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v16.2s
+; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v16.4s
+; CHECK-GI-NEXT: uaddw v4.2d, v6.2d, v17.2s
+; CHECK-GI-NEXT: uaddw2 v5.2d, v7.2d, v17.4s
+; CHECK-GI-NEXT: uaddw v6.2d, v19.2d, v18.2s
+; CHECK-GI-NEXT: uaddw2 v7.2d, v20.2d, v18.4s
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i64>
diff --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll
index 432ffc3..7524782 100644
--- a/llvm/test/CodeGen/AArch64/abds-neg.ll
+++ b/llvm/test/CodeGen/AArch64/abds-neg.ll
@@ -77,10 +77,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w9, w1, w8
-; CHECK-NEXT: subs w8, w8, w1
-; CHECK-NEXT: csel w8, w8, w9, gt
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w8
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%aext = sext i16 %a to i64
%bext = sext i32 %b to i64
@@ -111,10 +109,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w8, w9, w8, gt
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w0
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
@@ -129,10 +125,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w1
-; CHECK-NEXT: sub w9, w8, w0
-; CHECK-NEXT: subs w8, w0, w8
-; CHECK-NEXT: csel w8, w8, w9, gt
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w8, w0
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i16 %b to i64
@@ -146,10 +140,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w8, w9, w8, gt
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w0
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
@@ -163,10 +155,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x8, x9, x8, gt
-; CHECK-NEXT: neg x0, x8
+; CHECK-NEXT: subs x8, x1, x0
+; CHECK-NEXT: cneg x0, x8, ge
; CHECK-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
@@ -180,10 +170,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x8, x9, x8, gt
-; CHECK-NEXT: neg x0, x8
+; CHECK-NEXT: subs x8, x1, x0
+; CHECK-NEXT: cneg x0, x8, ge
; CHECK-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
@@ -359,9 +347,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_cmp_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w8, w9, ge
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ge
; CHECK-NEXT: ret
%cmp = icmp sge i32 %a, %b
%ab = sub i32 %a, %b
@@ -373,9 +360,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_cmp_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, lt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ge
; CHECK-NEXT: ret
%cmp = icmp slt i64 %a, %b
%ab = sub i64 %a, %b
diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll
index ed1e607..bbdb116 100644
--- a/llvm/test/CodeGen/AArch64/abds.ll
+++ b/llvm/test/CodeGen/AArch64/abds.ll
@@ -73,9 +73,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w9, w1, w8
; CHECK-NEXT: subs w8, w8, w1
-; CHECK-NEXT: csel w0, w8, w9, gt
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%aext = sext i16 %a to i64
%bext = sext i32 %b to i64
@@ -104,9 +103,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
@@ -120,9 +118,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w1
-; CHECK-NEXT: sub w9, w8, w0
; CHECK-NEXT: subs w8, w0, w8
-; CHECK-NEXT: csel w0, w8, w9, gt
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i16 %b to i64
@@ -135,9 +132,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
@@ -150,9 +146,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
@@ -165,9 +160,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
@@ -248,9 +242,8 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_minmax_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
%max = call i32 @llvm.smax.i32(i32 %a, i32 %b)
@@ -261,9 +254,8 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_minmax_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%min = call i64 @llvm.smin.i64(i64 %a, i64 %b)
%max = call i64 @llvm.smax.i64(i64 %a, i64 %b)
@@ -324,9 +316,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_cmp_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%cmp = icmp slt i32 %a, %b
%ab = sub i32 %a, %b
@@ -338,9 +329,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_cmp_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%cmp = icmp sge i64 %a, %b
%ab = sub i64 %a, %b
@@ -572,9 +562,8 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_select_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, gt
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
%cmp = icmp sgt i32 %a, %b
%ab = select i1 %cmp, i32 %a, i32 %b
@@ -586,9 +575,8 @@ define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_select_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, gt
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, le
; CHECK-NEXT: ret
%cmp = icmp sge i64 %a, %b
%ab = select i1 %cmp, i64 %a, i64 %b
diff --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll
index 8fb106e..d07f099a 100644
--- a/llvm/test/CodeGen/AArch64/abdu-neg.ll
+++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll
@@ -77,10 +77,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w9, w1, w8
-; CHECK-NEXT: subs w8, w8, w1
-; CHECK-NEXT: csel w8, w8, w9, hi
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w8
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%aext = zext i16 %a to i64
%bext = zext i32 %b to i64
@@ -111,10 +109,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w8, w9, w8, hi
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w0
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i32 %b to i64
@@ -129,10 +125,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w1, #0xffff
-; CHECK-NEXT: sub w9, w8, w0
-; CHECK-NEXT: subs w8, w0, w8
-; CHECK-NEXT: csel w8, w8, w9, hi
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w8, w0
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i16 %b to i64
@@ -146,10 +140,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w8, w9, w8, hi
-; CHECK-NEXT: neg w0, w8
+; CHECK-NEXT: subs w8, w1, w0
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i32 %b to i64
@@ -163,10 +155,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x8, x9, x8, hi
-; CHECK-NEXT: neg x0, x8
+; CHECK-NEXT: subs x8, x1, x0
+; CHECK-NEXT: cneg x0, x8, hs
; CHECK-NEXT: ret
%aext = zext i64 %a to i128
%bext = zext i64 %b to i128
@@ -180,10 +170,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x8, x9, x8, hi
-; CHECK-NEXT: neg x0, x8
+; CHECK-NEXT: subs x8, x1, x0
+; CHECK-NEXT: cneg x0, x8, hs
; CHECK-NEXT: ret
%aext = zext i64 %a to i128
%bext = zext i64 %b to i128
@@ -363,9 +351,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_cmp_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w8, w9, hs
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
%cmp = icmp uge i32 %a, %b
%ab = sub i32 %a, %b
@@ -377,9 +364,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_cmp_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, lo
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, hs
; CHECK-NEXT: ret
%cmp = icmp ult i64 %a, %b
%ab = sub i64 %a, %b
diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll
index 4585de9..1045ee2 100644
--- a/llvm/test/CodeGen/AArch64/abdu.ll
+++ b/llvm/test/CodeGen/AArch64/abdu.ll
@@ -73,9 +73,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w9, w1, w8
; CHECK-NEXT: subs w8, w8, w1
-; CHECK-NEXT: csel w0, w8, w9, hi
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%aext = zext i16 %a to i64
%bext = zext i32 %b to i64
@@ -104,9 +103,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i32 %b to i64
@@ -120,9 +118,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w1, #0xffff
-; CHECK-NEXT: sub w9, w8, w0
; CHECK-NEXT: subs w8, w0, w8
-; CHECK-NEXT: csel w0, w8, w9, hi
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i16 %b to i64
@@ -135,9 +132,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_ext_i32_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%aext = zext i32 %a to i64
%bext = zext i32 %b to i64
@@ -150,9 +146,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%aext = zext i64 %a to i128
%bext = zext i64 %b to i128
@@ -165,9 +160,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_ext_i64_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%aext = zext i64 %a to i128
%bext = zext i64 %b to i128
@@ -252,9 +246,8 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_minmax_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%min = call i32 @llvm.umin.i32(i32 %a, i32 %b)
%max = call i32 @llvm.umax.i32(i32 %a, i32 %b)
@@ -265,9 +258,8 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_minmax_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%min = call i64 @llvm.umin.i64(i64 %a, i64 %b)
%max = call i64 @llvm.umax.i64(i64 %a, i64 %b)
@@ -330,9 +322,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_cmp_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%cmp = icmp ult i32 %a, %b
%ab = sub i32 %a, %b
@@ -344,9 +335,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_cmp_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%cmp = icmp uge i64 %a, %b
%ab = sub i64 %a, %b
@@ -437,9 +427,8 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: abd_select_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: subs w9, w0, w1
-; CHECK-NEXT: csel w0, w9, w8, hi
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: cneg w0, w8, ls
; CHECK-NEXT: ret
%cmp = icmp ugt i32 %a, %b
%ab = select i1 %cmp, i32 %a, i32 %b
@@ -451,9 +440,8 @@ define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: abd_select_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x1, x0
-; CHECK-NEXT: subs x9, x0, x1
-; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: cneg x0, x8, ls
; CHECK-NEXT: ret
%cmp = icmp uge i64 %a, %b
%ab = select i1 %cmp, i64 %a, i64 %b
diff --git a/llvm/test/CodeGen/AArch64/adc.ll b/llvm/test/CodeGen/AArch64/adc.ll
index 4b1393f..12e8bf2 100644
--- a/llvm/test/CodeGen/AArch64/adc.ll
+++ b/llvm/test/CodeGen/AArch64/adc.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefix=CHECK-LE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefix=CHECK-BE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefixes=CHECK-LE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefixes=CHECK-BE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 -global-isel | FileCheck --check-prefixes=CHECK-GI %s
define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
; CHECK-LE-LABEL: test_simple:
@@ -18,11 +19,16 @@ define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
; CHECK-BE-NEXT: subs x1, x8, x5
; CHECK-BE-NEXT: sbc x0, x9, x4
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_simple:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: adds x8, x0, x2
+; CHECK-GI-NEXT: adc x9, x1, x3
+; CHECK-GI-NEXT: subs x0, x8, x4
+; CHECK-GI-NEXT: sbc x1, x9, x5
+; CHECK-GI-NEXT: ret
%valadd = add i128 %a, %b
-
%valsub = sub i128 %valadd, %c
-
ret i128 %valsub
}
@@ -38,9 +44,13 @@ define i128 @test_imm(i128 %a) {
; CHECK-BE-NEXT: adds x1, x1, #12
; CHECK-BE-NEXT: cinc x0, x0, hs
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_imm:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: adds x0, x0, #12
+; CHECK-GI-NEXT: adc x1, x1, xzr
+; CHECK-GI-NEXT: ret
%val = add i128 %a, 12
-
ret i128 %val
}
@@ -58,11 +68,16 @@ define i128 @test_shifted(i128 %a, i128 %b) {
; CHECK-BE-NEXT: adds x1, x1, x3, lsl #45
; CHECK-BE-NEXT: adc x0, x0, x8
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_shifted:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: lsr x8, x2, #19
+; CHECK-GI-NEXT: adds x0, x0, x2, lsl #45
+; CHECK-GI-NEXT: orr x8, x8, x3, lsl #45
+; CHECK-GI-NEXT: adc x1, x1, x8
+; CHECK-GI-NEXT: ret
%rhs = shl i128 %b, 45
-
%val = add i128 %a, %rhs
-
ret i128 %val
}
@@ -86,11 +101,19 @@ define i128 @test_extended(i128 %a, i16 %b) {
; CHECK-BE-NEXT: extr x8, x9, x8, #61
; CHECK-BE-NEXT: adc x0, x0, x8
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_extended:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: ; kill: def $w2 killed $w2 def $x2
+; CHECK-GI-NEXT: sxth x8, w2
+; CHECK-GI-NEXT: adds x0, x0, w2, sxth #3
+; CHECK-GI-NEXT: asr x9, x8, #63
+; CHECK-GI-NEXT: lsr x8, x8, #61
+; CHECK-GI-NEXT: orr x8, x8, x9, lsl #3
+; CHECK-GI-NEXT: adc x1, x1, x8
+; CHECK-GI-NEXT: ret
%ext = sext i16 %b to i128
%rhs = shl i128 %ext, 3
-
%val = add i128 %a, %rhs
-
ret i128 %val
}
diff --git a/llvm/test/CodeGen/AArch64/add-extract.ll b/llvm/test/CodeGen/AArch64/add-extract.ll
index 67c9f74..923bf08 100644
--- a/llvm/test/CodeGen/AArch64/add-extract.ll
+++ b/llvm/test/CodeGen/AArch64/add-extract.ll
@@ -1,13 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i64 @add_i64_ext_load(<1 x i64> %A, ptr %B) nounwind {
-; CHECK-LABEL: add_i64_ext_load:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr d1, [x0]
-; CHECK-NEXT: add d0, d0, d1
-; CHECK-NEXT: fmov x0, d0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_i64_ext_load:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d1, [x0]
+; CHECK-SD-NEXT: add d0, d0, d1
+; CHECK-SD-NEXT: fmov x0, d0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_i64_ext_load:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: fmov x9, d0
+; CHECK-GI-NEXT: ldr x8, [x0]
+; CHECK-GI-NEXT: add x0, x9, x8
+; CHECK-GI-NEXT: ret
%a = extractelement <1 x i64> %A, i32 0
%b = load i64, ptr %B
%c = add i64 %a, %b
@@ -15,12 +23,19 @@ define i64 @add_i64_ext_load(<1 x i64> %A, ptr %B) nounwind {
}
define i64 @sub_i64_ext_load(<1 x i64> %A, ptr %B) nounwind {
-; CHECK-LABEL: sub_i64_ext_load:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr d1, [x0]
-; CHECK-NEXT: sub d0, d0, d1
-; CHECK-NEXT: fmov x0, d0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sub_i64_ext_load:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d1, [x0]
+; CHECK-SD-NEXT: sub d0, d0, d1
+; CHECK-SD-NEXT: fmov x0, d0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sub_i64_ext_load:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: fmov x9, d0
+; CHECK-GI-NEXT: ldr x8, [x0]
+; CHECK-GI-NEXT: sub x0, x9, x8
+; CHECK-GI-NEXT: ret
%a = extractelement <1 x i64> %A, i32 0
%b = load i64, ptr %B
%c = sub i64 %a, %b
@@ -28,12 +43,20 @@ define i64 @sub_i64_ext_load(<1 x i64> %A, ptr %B) nounwind {
}
define void @add_i64_ext_load_store(<1 x i64> %A, ptr %B) nounwind {
-; CHECK-LABEL: add_i64_ext_load_store:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr d1, [x0]
-; CHECK-NEXT: add d0, d0, d1
-; CHECK-NEXT: str d0, [x0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_i64_ext_load_store:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d1, [x0]
+; CHECK-SD-NEXT: add d0, d0, d1
+; CHECK-SD-NEXT: str d0, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_i64_ext_load_store:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: fmov x9, d0
+; CHECK-GI-NEXT: ldr x8, [x0]
+; CHECK-GI-NEXT: add x8, x9, x8
+; CHECK-GI-NEXT: str x8, [x0]
+; CHECK-GI-NEXT: ret
%a = extractelement <1 x i64> %A, i32 0
%b = load i64, ptr %B
%c = add i64 %a, %b
@@ -55,11 +78,18 @@ define i64 @add_v2i64_ext_load(<2 x i64> %A, ptr %B) nounwind {
}
define i64 @add_i64_ext_ext(<1 x i64> %A, <1 x i64> %B) nounwind {
-; CHECK-LABEL: add_i64_ext_ext:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add d0, d0, d1
-; CHECK-NEXT: fmov x0, d0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_i64_ext_ext:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add d0, d0, d1
+; CHECK-SD-NEXT: fmov x0, d0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_i64_ext_ext:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: fmov x8, d0
+; CHECK-GI-NEXT: fmov x9, d1
+; CHECK-GI-NEXT: add x0, x8, x9
+; CHECK-GI-NEXT: ret
%a = extractelement <1 x i64> %A, i32 0
%b = extractelement <1 x i64> %B, i32 0
%c = add i64 %a, %b
@@ -67,13 +97,20 @@ define i64 @add_i64_ext_ext(<1 x i64> %A, <1 x i64> %B) nounwind {
}
define i32 @add_i32_ext_load(<1 x i32> %A, ptr %B) nounwind {
-; CHECK-LABEL: add_i32_ext_load:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: ldr w8, [x0]
-; CHECK-NEXT: add w0, w9, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_i32_ext_load:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: fmov w9, s0
+; CHECK-SD-NEXT: ldr w8, [x0]
+; CHECK-SD-NEXT: add w0, w9, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_i32_ext_load:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: ldr w8, [x0]
+; CHECK-GI-NEXT: add w0, w9, w8
+; CHECK-GI-NEXT: ret
%a = extractelement <1 x i32> %A, i32 0
%b = load i32, ptr %B
%c = add i32 %a, %b
@@ -81,13 +118,22 @@ define i32 @add_i32_ext_load(<1 x i32> %A, ptr %B) nounwind {
}
define i64 @add_i64_ext_ext_test1(<1 x i64> %A, <2 x i64> %B) nounwind {
-; CHECK-LABEL: add_i64_ext_ext_test1:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT: add d0, d0, d1
-; CHECK-NEXT: add d0, d0, d2
-; CHECK-NEXT: fmov x0, d0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_i64_ext_ext_test1:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ext v2.16b, v1.16b, v1.16b, #8
+; CHECK-SD-NEXT: add d0, d0, d1
+; CHECK-SD-NEXT: add d0, d0, d2
+; CHECK-SD-NEXT: fmov x0, d0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_i64_ext_ext_test1:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, v1.d[1]
+; CHECK-GI-NEXT: fmov x9, d0
+; CHECK-GI-NEXT: fmov x10, d1
+; CHECK-GI-NEXT: add x9, x9, x10
+; CHECK-GI-NEXT: add x0, x9, x8
+; CHECK-GI-NEXT: ret
%a = extractelement <1 x i64> %A, i32 0
%b = extractelement <2 x i64> %B, i32 0
%c = extractelement <2 x i64> %B, i32 1
@@ -97,13 +143,22 @@ define i64 @add_i64_ext_ext_test1(<1 x i64> %A, <2 x i64> %B) nounwind {
}
define i64 @sub_i64_ext_ext_test1(<1 x i64> %A, <2 x i64> %B) nounwind {
-; CHECK-LABEL: sub_i64_ext_ext_test1:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT: sub d0, d0, d1
-; CHECK-NEXT: sub d0, d0, d2
-; CHECK-NEXT: fmov x0, d0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sub_i64_ext_ext_test1:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ext v2.16b, v1.16b, v1.16b, #8
+; CHECK-SD-NEXT: sub d0, d0, d1
+; CHECK-SD-NEXT: sub d0, d0, d2
+; CHECK-SD-NEXT: fmov x0, d0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sub_i64_ext_ext_test1:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, v1.d[1]
+; CHECK-GI-NEXT: fmov x9, d0
+; CHECK-GI-NEXT: fmov x10, d1
+; CHECK-GI-NEXT: sub x9, x9, x10
+; CHECK-GI-NEXT: sub x0, x9, x8
+; CHECK-GI-NEXT: ret
%a = extractelement <1 x i64> %A, i32 0
%b = extractelement <2 x i64> %B, i32 0
%c = extractelement <2 x i64> %B, i32 1
diff --git a/llvm/test/CodeGen/AArch64/addcarry-crash.ll b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
index be75ab1..b4556c7 100644
--- a/llvm/test/CodeGen/AArch64/addcarry-crash.ll
+++ b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
@@ -1,16 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s | FileCheck %s
+; RUN: llc < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
target triple = "arm64-apple-ios7.0"
define i64 @foo(ptr nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unnamed_addr #0 {
-; CHECK-LABEL: foo:
-; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: lsr x8, x1, #32
-; CHECK-NEXT: ldr w9, [x0, #4]
-; CHECK-NEXT: cmn x3, x2
-; CHECK-NEXT: umull x8, w9, w8
-; CHECK-NEXT: cinc x0, x8, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: foo:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: lsr x8, x1, #32
+; CHECK-SD-NEXT: ldr w9, [x0, #4]
+; CHECK-SD-NEXT: cmn x3, x2
+; CHECK-SD-NEXT: umull x8, w9, w8
+; CHECK-SD-NEXT: cinc x0, x8, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: foo:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: ldr x8, [x0]
+; CHECK-GI-NEXT: lsr x9, x1, #32
+; CHECK-GI-NEXT: cmn x3, x2
+; CHECK-GI-NEXT: cset w10, hs
+; CHECK-GI-NEXT: lsr x8, x8, #32
+; CHECK-GI-NEXT: and x10, x10, #0x1
+; CHECK-GI-NEXT: umaddl x0, w8, w9, x10
+; CHECK-GI-NEXT: ret
entry:
%0 = lshr i64 %a, 32
%1 = load i64, ptr %ptr, align 8
@@ -24,3 +37,6 @@ entry:
}
attributes #0 = { norecurse nounwind readonly }
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll
index 3a4955c..bb0d38a 100644
--- a/llvm/test/CodeGen/AArch64/addsub.ll
+++ b/llvm/test/CodeGen/AArch64/addsub.ll
@@ -1,50 +1,26 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-linux-gnu -verify-machineinstrs | FileCheck %s
-
-; Note that this should be refactored (for efficiency if nothing else)
-; when the PCS is implemented so we don't have to worry about the
-; loads and stores.
-
-@var_i32 = global i32 42
-@var2_i32 = global i32 43
-@var_i64 = global i64 0
+; RUN: llc -mtriple=aarch64-none-elf < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; Add pure 12-bit immediates:
-define void @add_small() {
-; CHECK-LABEL: add_small:
-; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, :got:var_i32
-; CHECK-NEXT: adrp x9, :got:var_i64
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32]
-; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64]
-; CHECK-NEXT: ldr w10, [x8]
-; CHECK-NEXT: ldr x11, [x9]
-; CHECK-NEXT: add w10, w10, #4095
-; CHECK-NEXT: add x11, x11, #52
-; CHECK-NEXT: str w10, [x8]
-; CHECK-NEXT: str x11, [x9]
-; CHECK-NEXT: ret
-
- %val32 = load i32, ptr @var_i32
+define i32 @add_small_i32(i32 %val32) {
+; CHECK-LABEL: add_small_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add w0, w0, #4095
+; CHECK-NEXT: ret
%newval32 = add i32 %val32, 4095
- store i32 %newval32, ptr @var_i32
+ ret i32 %newval32
+}
- %val64 = load i64, ptr @var_i64
+define i64 @add_small_i64(i64 %val64) {
+; CHECK-LABEL: add_small_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add x0, x0, #52
+; CHECK-NEXT: ret
%newval64 = add i64 %val64, 52
- store i64 %newval64, ptr @var_i64
-
- ret void
+ ret i64 %newval64
}
-; Make sure we grab the imm variant when the register operand
-; can be implicitly zero-extend.
-; We used to generate something horrible like this:
-; wA = ldrb
-; xB = ldimm 12
-; xC = add xB, wA, uxtb
-; whereas this can be achieved with:
-; wA = ldrb
-; xC = add xA, #12 ; <- xA implicitly zero extend wA.
define void @add_small_imm(ptr %p, ptr %q, i32 %b, ptr %addr) {
; CHECK-LABEL: add_small_imm:
; CHECK: // %bb.0: // %entry
@@ -55,98 +31,71 @@ define void @add_small_imm(ptr %p, ptr %q, i32 %b, ptr %addr) {
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
entry:
-
%t = load i8, ptr %p
%promoted = zext i8 %t to i64
%zextt = zext i8 %t to i32
%add = add nuw i32 %zextt, %b
-
%add2 = add nuw i64 %promoted, 12
store i32 %add, ptr %addr
-
store i64 %add2, ptr %q
ret void
}
; Add 12-bit immediates, shifted left by 12 bits
-define void @add_med() {
-; CHECK-LABEL: add_med:
-; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, :got:var_i32
-; CHECK-NEXT: adrp x9, :got:var_i64
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32]
-; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64]
-; CHECK-NEXT: ldr w10, [x8]
-; CHECK-NEXT: ldr x11, [x9]
-; CHECK-NEXT: add w10, w10, #3567, lsl #12 // =14610432
-; CHECK-NEXT: add x11, x11, #4095, lsl #12 // =16773120
-; CHECK-NEXT: str w10, [x8]
-; CHECK-NEXT: str x11, [x9]
-; CHECK-NEXT: ret
-
- %val32 = load i32, ptr @var_i32
+define i32 @add_med_i32(i32 %val32) {
+; CHECK-LABEL: add_med_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add w0, w0, #3567, lsl #12 // =14610432
+; CHECK-NEXT: ret
%newval32 = add i32 %val32, 14610432 ; =0xdef000
- store i32 %newval32, ptr @var_i32
+ ret i32 %newval32
+}
- %val64 = load i64, ptr @var_i64
+define i64 @add_med_i64(i64 %val64) {
+; CHECK-LABEL: add_med_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add x0, x0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: ret
%newval64 = add i64 %val64, 16773120 ; =0xfff000
- store i64 %newval64, ptr @var_i64
-
- ret void
+ ret i64 %newval64
}
; Subtract 12-bit immediates
-define void @sub_small() {
-; CHECK-LABEL: sub_small:
-; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, :got:var_i32
-; CHECK-NEXT: adrp x9, :got:var_i64
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32]
-; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64]
-; CHECK-NEXT: ldr w10, [x8]
-; CHECK-NEXT: ldr x11, [x9]
-; CHECK-NEXT: sub w10, w10, #4095
-; CHECK-NEXT: sub x11, x11, #52
-; CHECK-NEXT: str w10, [x8]
-; CHECK-NEXT: str x11, [x9]
-; CHECK-NEXT: ret
-
- %val32 = load i32, ptr @var_i32
+define i32 @sub_small_i32(i32 %val32) {
+; CHECK-LABEL: sub_small_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w0, w0, #4095
+; CHECK-NEXT: ret
%newval32 = sub i32 %val32, 4095
- store i32 %newval32, ptr @var_i32
+ ret i32 %newval32
+}
- %val64 = load i64, ptr @var_i64
+define i64 @sub_small_i64(i64 %val64) {
+; CHECK-LABEL: sub_small_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub x0, x0, #52
+; CHECK-NEXT: ret
%newval64 = sub i64 %val64, 52
- store i64 %newval64, ptr @var_i64
-
- ret void
+ ret i64 %newval64
}
; Subtract 12-bit immediates, shifted left by 12 bits
-define void @sub_med() {
-; CHECK-LABEL: sub_med:
-; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, :got:var_i32
-; CHECK-NEXT: adrp x9, :got:var_i64
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32]
-; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64]
-; CHECK-NEXT: ldr w10, [x8]
-; CHECK-NEXT: ldr x11, [x9]
-; CHECK-NEXT: sub w10, w10, #3567, lsl #12 // =14610432
-; CHECK-NEXT: sub x11, x11, #4095, lsl #12 // =16773120
-; CHECK-NEXT: str w10, [x8]
-; CHECK-NEXT: str x11, [x9]
-; CHECK-NEXT: ret
-
- %val32 = load i32, ptr @var_i32
+define i32 @sub_med_i32(i32 %val32) {
+; CHECK-LABEL: sub_med_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w0, w0, #3567, lsl #12 // =14610432
+; CHECK-NEXT: ret
%newval32 = sub i32 %val32, 14610432 ; =0xdef000
- store i32 %newval32, ptr @var_i32
+ ret i32 %newval32
+}
- %val64 = load i64, ptr @var_i64
+define i64 @sub_med_i64(i64 %val64) {
+; CHECK-LABEL: sub_med_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub x0, x0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: ret
%newval64 = sub i64 %val64, 16773120 ; =0xfff000
- store i64 %newval64, ptr @var_i64
-
- ret void
+ ret i64 %newval64
}
define i64 @add_two_parts_imm_i64(i64 %a) {
@@ -261,10 +210,10 @@ define void @add_in_loop(i32 %0) {
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov w19, #43690 // =0xaaaa
; CHECK-NEXT: movk w19, #170, lsl #16
-; CHECK-NEXT: .LBB15_1: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: .LBB19_1: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: add w0, w0, w19
; CHECK-NEXT: bl foox
-; CHECK-NEXT: b .LBB15_1
+; CHECK-NEXT: b .LBB19_1
br label %2
2:
%3 = phi i32 [ %0, %1 ], [ %5, %2 ]
@@ -273,75 +222,103 @@ define void @add_in_loop(i32 %0) {
br label %2
}
-define void @testing() {
-; CHECK-LABEL: testing:
-; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, :got:var_i32
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32]
-; CHECK-NEXT: ldr w9, [x8]
-; CHECK-NEXT: cmp w9, #4095
-; CHECK-NEXT: b.ne .LBB16_6
-; CHECK-NEXT: // %bb.1: // %test2
-; CHECK-NEXT: adrp x10, :got:var2_i32
-; CHECK-NEXT: add w11, w9, #1
-; CHECK-NEXT: ldr x10, [x10, :got_lo12:var2_i32]
-; CHECK-NEXT: str w11, [x8]
-; CHECK-NEXT: ldr w10, [x10]
-; CHECK-NEXT: cmp w10, #3567, lsl #12 // =14610432
-; CHECK-NEXT: b.lo .LBB16_6
-; CHECK-NEXT: // %bb.2: // %test3
-; CHECK-NEXT: add w11, w9, #2
-; CHECK-NEXT: cmp w9, #123
-; CHECK-NEXT: str w11, [x8]
-; CHECK-NEXT: b.lt .LBB16_6
-; CHECK-NEXT: // %bb.3: // %test4
-; CHECK-NEXT: add w11, w9, #3
-; CHECK-NEXT: cmp w10, #321
-; CHECK-NEXT: str w11, [x8]
-; CHECK-NEXT: b.gt .LBB16_6
-; CHECK-NEXT: // %bb.4: // %test5
-; CHECK-NEXT: add w11, w9, #4
-; CHECK-NEXT: cmn w10, #443
-; CHECK-NEXT: str w11, [x8]
-; CHECK-NEXT: b.ge .LBB16_6
-; CHECK-NEXT: // %bb.5: // %test6
-; CHECK-NEXT: add w9, w9, #5
-; CHECK-NEXT: str w9, [x8]
-; CHECK-NEXT: .LBB16_6: // %common.ret
-; CHECK-NEXT: ret
- %val = load i32, ptr @var_i32
- %val2 = load i32, ptr @var2_i32
+define void @testing(ptr %var_i32, ptr %var2_i32) {
+; CHECK-SD-LABEL: testing:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr w8, [x0]
+; CHECK-SD-NEXT: cmp w8, #4095
+; CHECK-SD-NEXT: b.ne .LBB20_6
+; CHECK-SD-NEXT: // %bb.1: // %test2
+; CHECK-SD-NEXT: ldr w9, [x1]
+; CHECK-SD-NEXT: add w10, w8, #1
+; CHECK-SD-NEXT: str w10, [x0]
+; CHECK-SD-NEXT: cmp w9, #3567, lsl #12 // =14610432
+; CHECK-SD-NEXT: b.lo .LBB20_6
+; CHECK-SD-NEXT: // %bb.2: // %test3
+; CHECK-SD-NEXT: add w10, w8, #2
+; CHECK-SD-NEXT: cmp w8, #123
+; CHECK-SD-NEXT: str w10, [x0]
+; CHECK-SD-NEXT: b.lt .LBB20_6
+; CHECK-SD-NEXT: // %bb.3: // %test4
+; CHECK-SD-NEXT: add w10, w8, #3
+; CHECK-SD-NEXT: cmp w9, #321
+; CHECK-SD-NEXT: str w10, [x0]
+; CHECK-SD-NEXT: b.gt .LBB20_6
+; CHECK-SD-NEXT: // %bb.4: // %test5
+; CHECK-SD-NEXT: add w10, w8, #4
+; CHECK-SD-NEXT: cmn w9, #443
+; CHECK-SD-NEXT: str w10, [x0]
+; CHECK-SD-NEXT: b.ge .LBB20_6
+; CHECK-SD-NEXT: // %bb.5: // %test6
+; CHECK-SD-NEXT: add w8, w8, #5
+; CHECK-SD-NEXT: str w8, [x0]
+; CHECK-SD-NEXT: .LBB20_6: // %common.ret
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: testing:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr w8, [x0]
+; CHECK-GI-NEXT: cmp w8, #4095
+; CHECK-GI-NEXT: b.ne .LBB20_6
+; CHECK-GI-NEXT: // %bb.1: // %test2
+; CHECK-GI-NEXT: ldr w9, [x1]
+; CHECK-GI-NEXT: add w10, w8, #1
+; CHECK-GI-NEXT: str w10, [x0]
+; CHECK-GI-NEXT: cmp w9, #3567, lsl #12 // =14610432
+; CHECK-GI-NEXT: b.lo .LBB20_6
+; CHECK-GI-NEXT: // %bb.2: // %test3
+; CHECK-GI-NEXT: add w10, w8, #2
+; CHECK-GI-NEXT: cmp w8, #123
+; CHECK-GI-NEXT: str w10, [x0]
+; CHECK-GI-NEXT: b.lt .LBB20_6
+; CHECK-GI-NEXT: // %bb.3: // %test4
+; CHECK-GI-NEXT: add w10, w8, #3
+; CHECK-GI-NEXT: cmp w9, #321
+; CHECK-GI-NEXT: str w10, [x0]
+; CHECK-GI-NEXT: b.gt .LBB20_6
+; CHECK-GI-NEXT: // %bb.4: // %test5
+; CHECK-GI-NEXT: add w10, w8, #4
+; CHECK-GI-NEXT: cmn w9, #444
+; CHECK-GI-NEXT: str w10, [x0]
+; CHECK-GI-NEXT: b.gt .LBB20_6
+; CHECK-GI-NEXT: // %bb.5: // %test6
+; CHECK-GI-NEXT: add w8, w8, #5
+; CHECK-GI-NEXT: str w8, [x0]
+; CHECK-GI-NEXT: .LBB20_6: // %common.ret
+; CHECK-GI-NEXT: ret
+ %val = load i32, ptr %var_i32
+ %val2 = load i32, ptr %var2_i32
%cmp_pos_small = icmp ne i32 %val, 4095
br i1 %cmp_pos_small, label %ret, label %test2
test2:
%newval2 = add i32 %val, 1
- store i32 %newval2, ptr @var_i32
+ store i32 %newval2, ptr %var_i32
%cmp_pos_big = icmp ult i32 %val2, 14610432
br i1 %cmp_pos_big, label %ret, label %test3
test3:
%newval3 = add i32 %val, 2
- store i32 %newval3, ptr @var_i32
+ store i32 %newval3, ptr %var_i32
%cmp_pos_slt = icmp slt i32 %val, 123
br i1 %cmp_pos_slt, label %ret, label %test4
test4:
%newval4 = add i32 %val, 3
- store i32 %newval4, ptr @var_i32
+ store i32 %newval4, ptr %var_i32
%cmp_pos_sgt = icmp sgt i32 %val2, 321
br i1 %cmp_pos_sgt, label %ret, label %test5
test5:
%newval5 = add i32 %val, 4
- store i32 %newval5, ptr @var_i32
+ store i32 %newval5, ptr %var_i32
%cmp_neg_uge = icmp sgt i32 %val2, -444
br i1 %cmp_neg_uge, label %ret, label %test6
test6:
%newval6 = add i32 %val, 5
- store i32 %newval6, ptr @var_i32
+ store i32 %newval6, ptr %var_i32
ret void
ret:
@@ -371,15 +348,26 @@ define i1 @sadd_add(i32 %a, i32 %b, ptr %p) {
declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
define i1 @uadd_add(i8 %a, i8 %b, ptr %p) {
-; CHECK-LABEL: uadd_add:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #255 // =0xff
-; CHECK-NEXT: bic w8, w8, w0
-; CHECK-NEXT: add w8, w8, w1, uxtb
-; CHECK-NEXT: lsr w0, w8, #8
-; CHECK-NEXT: add w8, w8, #1
-; CHECK-NEXT: strb w8, [x2]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: uadd_add:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #255 // =0xff
+; CHECK-SD-NEXT: bic w8, w8, w0
+; CHECK-SD-NEXT: add w8, w8, w1, uxtb
+; CHECK-SD-NEXT: lsr w0, w8, #8
+; CHECK-SD-NEXT: add w8, w8, #1
+; CHECK-SD-NEXT: strb w8, [x2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: uadd_add:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mvn w8, w0
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: add w8, w9, w8, uxtb
+; CHECK-GI-NEXT: cmp w8, w8, uxtb
+; CHECK-GI-NEXT: add w8, w8, #1
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: strb w8, [x2]
+; CHECK-GI-NEXT: ret
%nota = xor i8 %a, -1
%a0 = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %nota, i8 %b)
%e0 = extractvalue {i8, i1} %a0, 0
@@ -521,29 +509,48 @@ define i1 @reject_non_eqne_csinc(i32 %0) {
}
define i32 @accept_csel(i32 %0) {
-; CHECK-LABEL: accept_csel:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub w9, w0, #273, lsl #12 // =1118208
-; CHECK-NEXT: mov w8, #17 // =0x11
-; CHECK-NEXT: cmp w9, #273
-; CHECK-NEXT: mov w9, #11 // =0xb
-; CHECK-NEXT: csel w0, w9, w8, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: accept_csel:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub w9, w0, #273, lsl #12 // =1118208
+; CHECK-SD-NEXT: mov w8, #17 // =0x11
+; CHECK-SD-NEXT: cmp w9, #273
+; CHECK-SD-NEXT: mov w9, #11 // =0xb
+; CHECK-SD-NEXT: csel w0, w9, w8, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: accept_csel:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub w8, w0, #273, lsl #12 // =1118208
+; CHECK-GI-NEXT: mov w9, #17 // =0x11
+; CHECK-GI-NEXT: mov w10, #11 // =0xb
+; CHECK-GI-NEXT: cmp w8, #273
+; CHECK-GI-NEXT: csel w0, w10, w9, eq
+; CHECK-GI-NEXT: ret
%2 = icmp eq i32 %0, 1118481
%3 = select i1 %2, i32 11, i32 17
ret i32 %3
}
define i32 @reject_non_eqne_csel(i32 %0) {
-; CHECK-LABEL: reject_non_eqne_csel:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #4369 // =0x1111
-; CHECK-NEXT: mov w9, #11 // =0xb
-; CHECK-NEXT: movk w8, #17, lsl #16
-; CHECK-NEXT: cmp w0, w8
-; CHECK-NEXT: mov w8, #17 // =0x11
-; CHECK-NEXT: csel w0, w9, w8, lo
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: reject_non_eqne_csel:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #4369 // =0x1111
+; CHECK-SD-NEXT: mov w9, #11 // =0xb
+; CHECK-SD-NEXT: movk w8, #17, lsl #16
+; CHECK-SD-NEXT: cmp w0, w8
+; CHECK-SD-NEXT: mov w8, #17 // =0x11
+; CHECK-SD-NEXT: csel w0, w9, w8, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: reject_non_eqne_csel:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4369 // =0x1111
+; CHECK-GI-NEXT: mov w9, #17 // =0x11
+; CHECK-GI-NEXT: mov w10, #11 // =0xb
+; CHECK-GI-NEXT: movk w8, #17, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: csel w0, w10, w9, lo
+; CHECK-GI-NEXT: ret
%2 = icmp ult i32 %0, 1118481
%3 = select i1 %2, i32 11, i32 17
ret i32 %3
@@ -556,10 +563,10 @@ define void @accept_branch(i32 %0) {
; CHECK: // %bb.0:
; CHECK-NEXT: sub w8, w0, #291, lsl #12 // =1191936
; CHECK-NEXT: cmp w8, #1110
-; CHECK-NEXT: b.eq .LBB32_2
+; CHECK-NEXT: b.eq .LBB36_2
; CHECK-NEXT: // %bb.1:
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: .LBB36_2:
; CHECK-NEXT: b fooy
%2 = icmp ne i32 %0, 1193046
br i1 %2, label %4, label %3
@@ -576,10 +583,10 @@ define void @reject_non_eqne_branch(i32 %0) {
; CHECK-NEXT: mov w8, #13398 // =0x3456
; CHECK-NEXT: movk w8, #18, lsl #16
; CHECK-NEXT: cmp w0, w8
-; CHECK-NEXT: b.le .LBB33_2
+; CHECK-NEXT: b.le .LBB37_2
; CHECK-NEXT: // %bb.1:
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB33_2:
+; CHECK-NEXT: .LBB37_2:
; CHECK-NEXT: b fooy
%2 = icmp sgt i32 %0, 1193046
br i1 %2, label %4, label %3
@@ -591,25 +598,45 @@ define void @reject_non_eqne_branch(i32 %0) {
}
define i32 @reject_multiple_usages(i32 %0) {
-; CHECK-LABEL: reject_multiple_usages:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #4369 // =0x1111
-; CHECK-NEXT: mov w9, #3 // =0x3
-; CHECK-NEXT: mov w10, #17 // =0x11
-; CHECK-NEXT: movk w8, #17, lsl #16
-; CHECK-NEXT: mov w11, #12 // =0xc
-; CHECK-NEXT: cmp w0, w8
-; CHECK-NEXT: mov w8, #9 // =0x9
-; CHECK-NEXT: csel w8, w8, w9, eq
-; CHECK-NEXT: csel w9, w11, w10, hi
-; CHECK-NEXT: mov w10, #53312 // =0xd040
-; CHECK-NEXT: movk w10, #2, lsl #16
-; CHECK-NEXT: add w8, w8, w9
-; CHECK-NEXT: mov w9, #26304 // =0x66c0
-; CHECK-NEXT: cmp w0, w10
-; CHECK-NEXT: movk w9, #1433, lsl #16
-; CHECK-NEXT: csel w0, w8, w9, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: reject_multiple_usages:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #4369 // =0x1111
+; CHECK-SD-NEXT: mov w9, #3 // =0x3
+; CHECK-SD-NEXT: mov w10, #17 // =0x11
+; CHECK-SD-NEXT: movk w8, #17, lsl #16
+; CHECK-SD-NEXT: mov w11, #12 // =0xc
+; CHECK-SD-NEXT: cmp w0, w8
+; CHECK-SD-NEXT: mov w8, #9 // =0x9
+; CHECK-SD-NEXT: csel w8, w8, w9, eq
+; CHECK-SD-NEXT: csel w9, w11, w10, hi
+; CHECK-SD-NEXT: mov w10, #53312 // =0xd040
+; CHECK-SD-NEXT: movk w10, #2, lsl #16
+; CHECK-SD-NEXT: add w8, w8, w9
+; CHECK-SD-NEXT: mov w9, #26304 // =0x66c0
+; CHECK-SD-NEXT: cmp w0, w10
+; CHECK-SD-NEXT: movk w9, #1433, lsl #16
+; CHECK-SD-NEXT: csel w0, w8, w9, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: reject_multiple_usages:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4369 // =0x1111
+; CHECK-GI-NEXT: mov w9, #3 // =0x3
+; CHECK-GI-NEXT: mov w10, #9 // =0x9
+; CHECK-GI-NEXT: movk w8, #17, lsl #16
+; CHECK-GI-NEXT: mov w11, #12 // =0xc
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: mov w8, #17 // =0x11
+; CHECK-GI-NEXT: csel w9, w10, w9, eq
+; CHECK-GI-NEXT: csel w8, w11, w8, hi
+; CHECK-GI-NEXT: mov w10, #53312 // =0xd040
+; CHECK-GI-NEXT: movk w10, #2, lsl #16
+; CHECK-GI-NEXT: add w8, w9, w8
+; CHECK-GI-NEXT: mov w9, #26304 // =0x66c0
+; CHECK-GI-NEXT: movk w9, #1433, lsl #16
+; CHECK-GI-NEXT: cmp w0, w10
+; CHECK-GI-NEXT: csel w0, w8, w9, hi
+; CHECK-GI-NEXT: ret
%2 = icmp eq i32 %0, 1118481
%3 = icmp ugt i32 %0, 1118481
%4 = select i1 %2, i32 9, i32 3
@@ -629,12 +656,12 @@ define dso_local i32 @neigh_periodic_work_tbl_1() {
; CHECK-NEXT: add x8, x8, :lo12:neigh_periodic_work_tbl_1
; CHECK-NEXT: add x8, x8, #18, lsl #12 // =73728
; CHECK-NEXT: cmn x8, #1272
-; CHECK-NEXT: b.mi .LBB35_2
+; CHECK-NEXT: b.mi .LBB39_2
; CHECK-NEXT: // %bb.1: // %if.end
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB35_2: // %for.cond
+; CHECK-NEXT: .LBB39_2: // %for.cond
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: b .LBB35_2
+; CHECK-NEXT: b .LBB39_2
entry:
%cmp = icmp slt i64 add (i64 ptrtoint (ptr @neigh_periodic_work_tbl_1 to i64), i64 75000), 0
br i1 %cmp, label %for.cond, label %if.end
@@ -654,15 +681,15 @@ define dso_local i32 @_extract_crng_crng() {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, _extract_crng_crng
; CHECK-NEXT: add x8, x8, :lo12:_extract_crng_crng
-; CHECK-NEXT: tbnz x8, #63, .LBB36_2
+; CHECK-NEXT: tbnz x8, #63, .LBB40_2
; CHECK-NEXT: // %bb.1: // %lor.lhs.false
; CHECK-NEXT: adrp x9, jiffies
; CHECK-NEXT: ldrsw x9, [x9, :lo12:jiffies]
; CHECK-NEXT: sub x8, x8, x9
; CHECK-NEXT: add x8, x8, #18, lsl #12 // =73728
; CHECK-NEXT: cmn x8, #1272
-; CHECK-NEXT: b.pl .LBB36_3
-; CHECK-NEXT: .LBB36_2: // %if.then
+; CHECK-NEXT: b.pl .LBB40_3
+; CHECK-NEXT: .LBB40_2: // %if.then
; CHECK-NEXT: adrp x8, primary_crng
; CHECK-NEXT: ldr w8, [x8, :lo12:primary_crng]
; CHECK-NEXT: cmp w8, #0
@@ -670,7 +697,7 @@ define dso_local i32 @_extract_crng_crng() {
; CHECK-NEXT: add x8, x8, :lo12:input_pool
; CHECK-NEXT: csel x0, xzr, x8, eq
; CHECK-NEXT: b crng_reseed
-; CHECK-NEXT: .LBB36_3: // %if.end
+; CHECK-NEXT: .LBB40_3: // %if.end
; CHECK-NEXT: ret
entry:
%cmp2 = icmp slt ptr @_extract_crng_crng, null
@@ -694,11 +721,18 @@ if.end: ; preds = %if.then, %lor.lhs.f
; ((X << C) - Y) + Z --> (Z - Y) + (X << C)
define i32 @commute_subop0(i32 %x, i32 %y, i32 %z) {
-; CHECK-LABEL: commute_subop0:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w2, w1
-; CHECK-NEXT: add w0, w8, w0, lsl #3
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub w8, w2, w1
+; CHECK-SD-NEXT: add w0, w8, w0, lsl #3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: lsl w8, w0, #3
+; CHECK-GI-NEXT: sub w8, w8, w1
+; CHECK-GI-NEXT: add w0, w8, w2
+; CHECK-GI-NEXT: ret
%shl = shl i32 %x, 3
%sub = sub i32 %shl, %y
%add = add i32 %sub, %z
@@ -707,11 +741,18 @@ define i32 @commute_subop0(i32 %x, i32 %y, i32 %z) {
; ((X >> C) - Y) + Z --> (Z - Y) + (X >> C)
define i32 @commute_subop0_lshr(i32 %x, i32 %y, i32 %z) {
-; CHECK-LABEL: commute_subop0_lshr:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w2, w1
-; CHECK-NEXT: add w0, w8, w0, lsr #3
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_lshr:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub w8, w2, w1
+; CHECK-SD-NEXT: add w0, w8, w0, lsr #3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_lshr:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: lsr w8, w0, #3
+; CHECK-GI-NEXT: sub w8, w8, w1
+; CHECK-GI-NEXT: add w0, w8, w2
+; CHECK-GI-NEXT: ret
%lshr = lshr i32 %x, 3
%sub = sub i32 %lshr, %y
%add = add i32 %sub, %z
@@ -720,11 +761,18 @@ define i32 @commute_subop0_lshr(i32 %x, i32 %y, i32 %z) {
; ((X >> C) - Y) + Z --> (Z - Y) + (X >> C)
define i32 @commute_subop0_ashr(i32 %x, i32 %y, i32 %z) {
-; CHECK-LABEL: commute_subop0_ashr:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w2, w1
-; CHECK-NEXT: add w0, w8, w0, asr #3
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_ashr:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub w8, w2, w1
+; CHECK-SD-NEXT: add w0, w8, w0, asr #3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_ashr:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: asr w8, w0, #3
+; CHECK-GI-NEXT: sub w8, w8, w1
+; CHECK-GI-NEXT: add w0, w8, w2
+; CHECK-GI-NEXT: ret
%ashr = ashr i32 %x, 3
%sub = sub i32 %ashr, %y
%add = add i32 %sub, %z
@@ -733,11 +781,19 @@ define i32 @commute_subop0_ashr(i32 %x, i32 %y, i32 %z) {
; ((sext X) - Y) + Z --> (Z - Y) + (sext X)
define i64 @commute_subop0_sext(i32 %x, i64 %y, i64 %z) {
-; CHECK-LABEL: commute_subop0_sext:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x2, x1
-; CHECK-NEXT: add x0, x8, w0, sxtw
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_sext:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub x8, x2, x1
+; CHECK-SD-NEXT: add x0, x8, w0, sxtw
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_sext:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: sxtw x8, w0
+; CHECK-GI-NEXT: sub x8, x8, x1
+; CHECK-GI-NEXT: add x0, x8, x2
+; CHECK-GI-NEXT: ret
%sext = sext i32 %x to i64
%sub = sub i64 %sext, %y
%add = add i64 %sub, %z
@@ -746,11 +802,18 @@ define i64 @commute_subop0_sext(i32 %x, i64 %y, i64 %z) {
; ((sext_inreg X) - Y) + Z --> (Z - Y) + (sext_inreg X)
define i64 @commute_subop0_sext_inreg(i64 %x, i64 %y, i64 %z) {
-; CHECK-LABEL: commute_subop0_sext_inreg:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub x8, x2, x1
-; CHECK-NEXT: add x0, x8, w0, sxth
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_sext_inreg:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub x8, x2, x1
+; CHECK-SD-NEXT: add x0, x8, w0, sxth
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_sext_inreg:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sxth x8, w0
+; CHECK-GI-NEXT: sub x8, x8, x1
+; CHECK-GI-NEXT: add x0, x8, x2
+; CHECK-GI-NEXT: ret
%shl = shl i64 %x, 48
%ashr = ashr i64 %shl, 48
%sub = sub i64 %ashr, %y
@@ -760,11 +823,18 @@ define i64 @commute_subop0_sext_inreg(i64 %x, i64 %y, i64 %z) {
; ((zext X) - Y) + Z --> (Z - Y) + (zext X)
define i32 @commute_subop0_zext(i16 %x, i32 %y, i32 %z) {
-; CHECK-LABEL: commute_subop0_zext:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w2, w1
-; CHECK-NEXT: add w0, w8, w0, uxth
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_zext:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub w8, w2, w1
+; CHECK-SD-NEXT: add w0, w8, w0, uxth
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_zext:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0xffff
+; CHECK-GI-NEXT: sub w8, w8, w1
+; CHECK-GI-NEXT: add w0, w8, w2
+; CHECK-GI-NEXT: ret
%zext = zext i16 %x to i32
%sub = sub i32 %zext, %y
%add = add i32 %sub, %z
@@ -774,14 +844,25 @@ define i32 @commute_subop0_zext(i16 %x, i32 %y, i32 %z) {
; ((anyext X) - Y) + Z --> (Z - Y) + (anyext X)
define i8 @commute_subop0_anyext(i16 %a, i16 %b, i32 %c) {
-; CHECK-LABEL: commute_subop0_anyext:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #111 // =0x6f
-; CHECK-NEXT: sub w9, w2, w1
-; CHECK-NEXT: madd w8, w0, w8, w9
-; CHECK-NEXT: lsl w8, w8, #3
-; CHECK-NEXT: sub w0, w8, #1776
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_anyext:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #111 // =0x6f
+; CHECK-SD-NEXT: sub w9, w2, w1
+; CHECK-SD-NEXT: madd w8, w0, w8, w9
+; CHECK-SD-NEXT: lsl w8, w8, #3
+; CHECK-SD-NEXT: sub w0, w8, #1776
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_anyext:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #111 // =0x6f
+; CHECK-GI-NEXT: add w9, w1, #222
+; CHECK-GI-NEXT: mul w8, w0, w8
+; CHECK-GI-NEXT: and w8, w8, #0xffff
+; CHECK-GI-NEXT: sub w8, w8, w9, uxth
+; CHECK-GI-NEXT: add w8, w8, w2
+; CHECK-GI-NEXT: lsl w0, w8, #3
+; CHECK-GI-NEXT: ret
%aa = mul i16 %a, 111
%bb = add i16 %b, 222
%a_32 = zext i16 %aa to i32
@@ -795,11 +876,18 @@ define i8 @commute_subop0_anyext(i16 %a, i16 %b, i32 %c) {
; ((X and C) - Y) + Z --> (Z - Y) + (X and C)
define i32 @commute_subop0_and(i32 %x, i32 %y, i32 %z) {
-; CHECK-LABEL: commute_subop0_and:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w2, w1
-; CHECK-NEXT: add w0, w8, w0, uxtb
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_and:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub w8, w2, w1
+; CHECK-SD-NEXT: add w0, w8, w0, uxtb
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_and:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0xff
+; CHECK-GI-NEXT: sub w8, w8, w1
+; CHECK-GI-NEXT: add w0, w8, w2
+; CHECK-GI-NEXT: ret
%and = and i32 %x, 255
%sub = sub i32 %and, %y
%add = add i32 %sub, %z
@@ -808,11 +896,18 @@ define i32 @commute_subop0_and(i32 %x, i32 %y, i32 %z) {
; Z + ((X << C) - Y) --> (Z - Y) + (X << C)
define i32 @commute_subop0_cadd(i32 %x, i32 %y, i32 %z) {
-; CHECK-LABEL: commute_subop0_cadd:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w2, w1
-; CHECK-NEXT: add w0, w8, w0, lsl #3
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_cadd:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub w8, w2, w1
+; CHECK-SD-NEXT: add w0, w8, w0, lsl #3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_cadd:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: lsl w8, w0, #3
+; CHECK-GI-NEXT: sub w8, w8, w1
+; CHECK-GI-NEXT: add w0, w2, w8
+; CHECK-GI-NEXT: ret
%shl = shl i32 %x, 3
%sub = sub i32 %shl, %y
%add = add i32 %z, %sub
@@ -821,11 +916,18 @@ define i32 @commute_subop0_cadd(i32 %x, i32 %y, i32 %z) {
; Y + ((X << C) - X) --> (Y - X) + (X << C)
define i32 @commute_subop0_mul(i32 %x, i32 %y) {
-; CHECK-LABEL: commute_subop0_mul:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub w8, w1, w0
-; CHECK-NEXT: add w0, w8, w0, lsl #3
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_mul:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub w8, w1, w0
+; CHECK-SD-NEXT: add w0, w8, w0, lsl #3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_mul:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: lsl w8, w0, #3
+; CHECK-GI-NEXT: sub w8, w8, w0
+; CHECK-GI-NEXT: add w0, w8, w1
+; CHECK-GI-NEXT: ret
%mul = mul i32 %x, 7
%add = add i32 %mul, %y
ret i32 %add
@@ -863,13 +965,22 @@ define i32 @commute_subop0_zshiftc_oneuse(i32 %x, i32 %y, i32 %z) {
}
define i32 @commute_subop0_zshiftc(i32 %x, i32 %y, i32 %z) {
-; CHECK-LABEL: commute_subop0_zshiftc:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w2, #2
-; CHECK-NEXT: sub w9, w8, w1
-; CHECK-NEXT: add w9, w9, w0, lsl #3
-; CHECK-NEXT: eor w0, w8, w9
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: commute_subop0_zshiftc:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w2, #2
+; CHECK-SD-NEXT: sub w9, w8, w1
+; CHECK-SD-NEXT: add w9, w9, w0, lsl #3
+; CHECK-SD-NEXT: eor w0, w8, w9
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commute_subop0_zshiftc:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: lsl w8, w0, #3
+; CHECK-GI-NEXT: lsl w9, w2, #2
+; CHECK-GI-NEXT: sub w8, w8, w1
+; CHECK-GI-NEXT: add w8, w8, w9
+; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: ret
%xshl = shl i32 %x, 3
%sub = sub i32 %xshl, %y
%zshl = shl i32 %z, 2
diff --git a/llvm/test/CodeGen/AArch64/arm64-this-return.ll b/llvm/test/CodeGen/AArch64/arm64-this-return.ll
index a497ba2..7dd47ac 100644
--- a/llvm/test/CodeGen/AArch64/arm64-this-return.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-this-return.ll
@@ -148,7 +148,7 @@ define ptr @E_ctor_base(ptr %this, i32 %x) {
; GISEL-MIR: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
; GISEL-MIR: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
; GISEL-MIR: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GISEL-MIR: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GISEL-MIR: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GISEL-MIR: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; GISEL-MIR: $x0 = COPY [[PTR_ADD]](p0)
; GISEL-MIR: $w1 = COPY [[COPY1]](s32)
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index b325851..78881c8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck -check-prefixes=CHECK,CHECK-SD %s
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s -check-prefixes=CHECK,CHECK-SD
; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind {
diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index 937a17c..07400bb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -1,12 +1,50 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mattr=+aes | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+; CHECK-GI: warning: Instruction selection used fallback path for pmull8h
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmulh_1s
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_2s
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_4s
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_2d
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_commuted_neg_2s
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_commuted_neg_4s
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_commuted_neg_2d
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_2s
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_4s
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_2d
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_2s_strict
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_4s_strict
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_2d_strict
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmla_indexed_scalar_2s_strict
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmla_indexed_scalar_4s_strict
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmla_indexed_scalar_2d_strict
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmulh_lane_1s
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlal_lane_1d
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlsl_lane_1d
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmull_from_extract_dup_low
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmull_from_extract_dup_high
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmull_from_extract_duplane_low
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmull_from_extract_duplane_high
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for scalar_fmls_from_extract_v4f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for scalar_fmls_from_extract_v2f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for scalar_fmls_from_extract_v2f64
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v2f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v2f32_1
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v4f32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v4f32_1
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v2f64
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlal_d
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlsl_d
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_pmull_64
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_pmull_high_64
define <8 x i16> @smull8h(ptr %A, ptr %B) nounwind {
; CHECK-LABEL: smull8h:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: smull.8h v0, v0, v1
+; CHECK-NEXT: smull v0.8h, v0.8b, v1.8b
; CHECK-NEXT: ret
%tmp1 = load <8 x i8>, ptr %A
%tmp2 = load <8 x i8>, ptr %B
@@ -19,7 +57,7 @@ define <4 x i32> @smull4s(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: smull.4s v0, v0, v1
+; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -32,7 +70,7 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: smull.2d v0, v0, v1
+; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -49,7 +87,7 @@ define <8 x i16> @umull8h(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: umull.8h v0, v0, v1
+; CHECK-NEXT: umull v0.8h, v0.8b, v1.8b
; CHECK-NEXT: ret
%tmp1 = load <8 x i8>, ptr %A
%tmp2 = load <8 x i8>, ptr %B
@@ -62,7 +100,7 @@ define <4 x i32> @umull4s(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: umull.4s v0, v0, v1
+; CHECK-NEXT: umull v0.4s, v0.4h, v1.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -75,7 +113,7 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: umull.2d v0, v0, v1
+; CHECK-NEXT: umull v0.2d, v0.2s, v1.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -92,7 +130,7 @@ define <4 x i32> @sqdmull4s(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: sqdmull.4s v0, v0, v1
+; CHECK-NEXT: sqdmull v0.4s, v0.4h, v1.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -105,7 +143,7 @@ define <2 x i64> @sqdmull2d(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: sqdmull.2d v0, v0, v1
+; CHECK-NEXT: sqdmull v0.2d, v0.2s, v1.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -114,12 +152,19 @@ define <2 x i64> @sqdmull2d(ptr %A, ptr %B) nounwind {
}
define <4 x i32> @sqdmull2_4s(ptr %A, ptr %B) nounwind {
-; CHECK-LABEL: sqdmull2_4s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr d0, [x0, #8]
-; CHECK-NEXT: ldr d1, [x1, #8]
-; CHECK-NEXT: sqdmull.4s v0, v0, v1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmull2_4s:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d0, [x0, #8]
+; CHECK-SD-NEXT: ldr d1, [x1, #8]
+; CHECK-SD-NEXT: sqdmull v0.4s, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmull2_4s:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr q0, [x0]
+; CHECK-GI-NEXT: ldr q1, [x1]
+; CHECK-GI-NEXT: sqdmull2 v0.4s, v0.8h, v1.8h
+; CHECK-GI-NEXT: ret
%load1 = load <8 x i16>, ptr %A
%load2 = load <8 x i16>, ptr %B
%tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -129,12 +174,19 @@ define <4 x i32> @sqdmull2_4s(ptr %A, ptr %B) nounwind {
}
define <2 x i64> @sqdmull2_2d(ptr %A, ptr %B) nounwind {
-; CHECK-LABEL: sqdmull2_2d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr d0, [x0, #8]
-; CHECK-NEXT: ldr d1, [x1, #8]
-; CHECK-NEXT: sqdmull.2d v0, v0, v1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmull2_2d:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d0, [x0, #8]
+; CHECK-SD-NEXT: ldr d1, [x1, #8]
+; CHECK-SD-NEXT: sqdmull v0.2d, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmull2_2d:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr q0, [x0]
+; CHECK-GI-NEXT: ldr q1, [x1]
+; CHECK-GI-NEXT: sqdmull2 v0.2d, v0.4s, v1.4s
+; CHECK-GI-NEXT: ret
%load1 = load <4 x i32>, ptr %A
%load2 = load <4 x i32>, ptr %B
%tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -152,7 +204,7 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: pmull.8h v0, v0, v1
+; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b
; CHECK-NEXT: ret
%tmp1 = load <8 x i8>, ptr %A
%tmp2 = load <8 x i8>, ptr %B
@@ -167,7 +219,7 @@ define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: sqdmulh.4h v0, v0, v1
+; CHECK-NEXT: sqdmulh v0.4h, v0.4h, v1.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -180,7 +232,7 @@ define <8 x i16> @sqdmulh_8h(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
-; CHECK-NEXT: sqdmulh.8h v0, v0, v1
+; CHECK-NEXT: sqdmulh v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
@@ -193,7 +245,7 @@ define <2 x i32> @sqdmulh_2s(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: sqdmulh.2s v0, v0, v1
+; CHECK-NEXT: sqdmulh v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -206,7 +258,7 @@ define <4 x i32> @sqdmulh_4s(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
-; CHECK-NEXT: sqdmulh.4s v0, v0, v1
+; CHECK-NEXT: sqdmulh v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
@@ -241,7 +293,7 @@ define <4 x i16> @sqrdmulh_4h(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: sqrdmulh.4h v0, v0, v1
+; CHECK-NEXT: sqrdmulh v0.4h, v0.4h, v1.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -254,7 +306,7 @@ define <8 x i16> @sqrdmulh_8h(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
-; CHECK-NEXT: sqrdmulh.8h v0, v0, v1
+; CHECK-NEXT: sqrdmulh v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
@@ -267,7 +319,7 @@ define <2 x i32> @sqrdmulh_2s(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: sqrdmulh.2s v0, v0, v1
+; CHECK-NEXT: sqrdmulh v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -280,7 +332,7 @@ define <4 x i32> @sqrdmulh_4s(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
-; CHECK-NEXT: sqrdmulh.4s v0, v0, v1
+; CHECK-NEXT: sqrdmulh v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
@@ -289,15 +341,23 @@ define <4 x i32> @sqrdmulh_4s(ptr %A, ptr %B) nounwind {
}
define i32 @sqrdmulh_1s(ptr %A, ptr %B) nounwind {
-; CHECK-LABEL: sqrdmulh_1s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr w8, [x0]
-; CHECK-NEXT: ldr w9, [x1]
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: sqrdmulh s0, s0, s1
-; CHECK-NEXT: fmov w0, s0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqrdmulh_1s:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr w8, [x0]
+; CHECK-SD-NEXT: ldr w9, [x1]
+; CHECK-SD-NEXT: fmov s0, w8
+; CHECK-SD-NEXT: fmov s1, w9
+; CHECK-SD-NEXT: sqrdmulh s0, s0, s1
+; CHECK-SD-NEXT: fmov w0, s0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqrdmulh_1s:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr s0, [x0]
+; CHECK-GI-NEXT: ldr s1, [x1]
+; CHECK-GI-NEXT: sqrdmulh s0, s0, s1
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
%tmp1 = load i32, ptr %A
%tmp2 = load i32, ptr %B
%tmp3 = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %tmp1, i32 %tmp2)
@@ -315,7 +375,7 @@ define <2 x float> @fmulx_2s(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: fmulx.2s v0, v0, v1
+; CHECK-NEXT: fmulx v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x float>, ptr %A
%tmp2 = load <2 x float>, ptr %B
@@ -328,7 +388,7 @@ define <4 x float> @fmulx_4s(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
-; CHECK-NEXT: fmulx.4s v0, v0, v1
+; CHECK-NEXT: fmulx v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x float>, ptr %A
%tmp2 = load <4 x float>, ptr %B
@@ -341,7 +401,7 @@ define <2 x double> @fmulx_2d(ptr %A, ptr %B) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
-; CHECK-NEXT: fmulx.2d v0, v0, v1
+; CHECK-NEXT: fmulx v0.2d, v0.2d, v1.2d
; CHECK-NEXT: ret
%tmp1 = load <2 x double>, ptr %A
%tmp2 = load <2 x double>, ptr %B
@@ -359,7 +419,7 @@ define <4 x i32> @smlal4s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: smlal.4s v0, v1, v2
+; CHECK-NEXT: smlal v0.4s, v1.4h, v2.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -375,7 +435,7 @@ define <2 x i64> @smlal2d(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: smlal.2d v0, v1, v2
+; CHECK-NEXT: smlal v0.2d, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -386,14 +446,24 @@ define <2 x i64> @smlal2d(ptr %A, ptr %B, ptr %C) nounwind {
}
define void @smlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
-; CHECK-LABEL: smlal8h_chain_with_constant:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi.16b v3, #1
-; CHECK-NEXT: smlal.8h v3, v0, v2
-; CHECK-NEXT: mvn.8b v0, v2
-; CHECK-NEXT: smlal.8h v3, v1, v0
-; CHECK-NEXT: str q3, [x0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: smlal8h_chain_with_constant:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v3.16b, #1
+; CHECK-SD-NEXT: smlal v3.8h, v0.8b, v2.8b
+; CHECK-SD-NEXT: mvn v0.8b, v2.8b
+; CHECK-SD-NEXT: smlal v3.8h, v1.8b, v0.8b
+; CHECK-SD-NEXT: str q3, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: smlal8h_chain_with_constant:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mvn v3.8b, v2.8b
+; CHECK-GI-NEXT: smull v1.8h, v1.8b, v3.8b
+; CHECK-GI-NEXT: movi v3.16b, #1
+; CHECK-GI-NEXT: smlal v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT: add v0.8h, v1.8h, v3.8h
+; CHECK-GI-NEXT: str q0, [x0]
+; CHECK-GI-NEXT: ret
%xor = xor <8 x i8> %v3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%smull.1 = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %v1, <8 x i8> %v3)
%add.1 = add <8 x i16> %smull.1, <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257>
@@ -404,15 +474,26 @@ define void @smlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <
}
define void @smlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
-; CHECK-LABEL: smlal2d_chain_with_constant:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #257 // =0x101
-; CHECK-NEXT: dup.2d v3, x8
-; CHECK-NEXT: smlal.2d v3, v0, v2
-; CHECK-NEXT: mvn.8b v0, v2
-; CHECK-NEXT: smlal.2d v3, v1, v0
-; CHECK-NEXT: str q3, [x0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: smlal2d_chain_with_constant:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #257 // =0x101
+; CHECK-SD-NEXT: dup v3.2d, x8
+; CHECK-SD-NEXT: smlal v3.2d, v0.2s, v2.2s
+; CHECK-SD-NEXT: mvn v0.8b, v2.8b
+; CHECK-SD-NEXT: smlal v3.2d, v1.2s, v0.2s
+; CHECK-SD-NEXT: str q3, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: smlal2d_chain_with_constant:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mvn v3.8b, v2.8b
+; CHECK-GI-NEXT: adrp x8, .LCPI27_0
+; CHECK-GI-NEXT: smull v1.2d, v1.2s, v3.2s
+; CHECK-GI-NEXT: smlal v1.2d, v0.2s, v2.2s
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI27_0]
+; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT: str q0, [x0]
+; CHECK-GI-NEXT: ret
%xor = xor <2 x i32> %v3, <i32 -1, i32 -1>
%smull.1 = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %v1, <2 x i32> %v3)
%add.1 = add <2 x i64> %smull.1, <i64 257, i64 257>
@@ -428,7 +509,7 @@ define <4 x i32> @smlsl4s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: smlsl.4s v0, v1, v2
+; CHECK-NEXT: smlsl v0.4s, v1.4h, v2.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -444,7 +525,7 @@ define <2 x i64> @smlsl2d(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: smlsl.2d v0, v1, v2
+; CHECK-NEXT: smlsl v0.2d, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -457,10 +538,10 @@ define <2 x i64> @smlsl2d(ptr %A, ptr %B, ptr %C) nounwind {
define void @smlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
; CHECK-LABEL: smlsl8h_chain_with_constant:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi.16b v3, #1
-; CHECK-NEXT: smlsl.8h v3, v0, v2
-; CHECK-NEXT: mvn.8b v0, v2
-; CHECK-NEXT: smlsl.8h v3, v1, v0
+; CHECK-NEXT: movi v3.16b, #1
+; CHECK-NEXT: smlsl v3.8h, v0.8b, v2.8b
+; CHECK-NEXT: mvn v0.8b, v2.8b
+; CHECK-NEXT: smlsl v3.8h, v1.8b, v0.8b
; CHECK-NEXT: str q3, [x0]
; CHECK-NEXT: ret
%xor = xor <8 x i8> %v3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -473,15 +554,25 @@ define void @smlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <
}
define void @smlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
-; CHECK-LABEL: smlsl2d_chain_with_constant:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #257 // =0x101
-; CHECK-NEXT: dup.2d v3, x8
-; CHECK-NEXT: smlsl.2d v3, v0, v2
-; CHECK-NEXT: mvn.8b v0, v2
-; CHECK-NEXT: smlsl.2d v3, v1, v0
-; CHECK-NEXT: str q3, [x0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: smlsl2d_chain_with_constant:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #257 // =0x101
+; CHECK-SD-NEXT: dup v3.2d, x8
+; CHECK-SD-NEXT: smlsl v3.2d, v0.2s, v2.2s
+; CHECK-SD-NEXT: mvn v0.8b, v2.8b
+; CHECK-SD-NEXT: smlsl v3.2d, v1.2s, v0.2s
+; CHECK-SD-NEXT: str q3, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: smlsl2d_chain_with_constant:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI31_0
+; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI31_0]
+; CHECK-GI-NEXT: smlsl v3.2d, v0.2s, v2.2s
+; CHECK-GI-NEXT: mvn v0.8b, v2.8b
+; CHECK-GI-NEXT: smlsl v3.2d, v1.2s, v0.2s
+; CHECK-GI-NEXT: str q3, [x0]
+; CHECK-GI-NEXT: ret
%xor = xor <2 x i32> %v3, <i32 -1, i32 -1>
%smull.1 = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %v1, <2 x i32> %v3)
%sub.1 = sub <2 x i64> <i64 257, i64 257>, %smull.1
@@ -502,7 +593,7 @@ define <4 x i32> @sqdmlal4s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: sqdmlal.4s v0, v1, v2
+; CHECK-NEXT: sqdmlal v0.4s, v1.4h, v2.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -518,7 +609,7 @@ define <2 x i64> @sqdmlal2d(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: sqdmlal.2d v0, v1, v2
+; CHECK-NEXT: sqdmlal v0.2d, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -529,13 +620,21 @@ define <2 x i64> @sqdmlal2d(ptr %A, ptr %B, ptr %C) nounwind {
}
define <4 x i32> @sqdmlal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
-; CHECK-LABEL: sqdmlal2_4s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: ldr d1, [x0, #8]
-; CHECK-NEXT: ldr d2, [x1, #8]
-; CHECK-NEXT: sqdmlal.4s v0, v1, v2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlal2_4s:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr q0, [x2]
+; CHECK-SD-NEXT: ldr d1, [x0, #8]
+; CHECK-SD-NEXT: ldr d2, [x1, #8]
+; CHECK-SD-NEXT: sqdmlal v0.4s, v1.4h, v2.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlal2_4s:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr q1, [x0]
+; CHECK-GI-NEXT: ldr q2, [x1]
+; CHECK-GI-NEXT: ldr q0, [x2]
+; CHECK-GI-NEXT: sqdmlal2 v0.4s, v1.8h, v2.8h
+; CHECK-GI-NEXT: ret
%load1 = load <8 x i16>, ptr %A
%load2 = load <8 x i16>, ptr %B
%tmp3 = load <4 x i32>, ptr %C
@@ -547,13 +646,21 @@ define <4 x i32> @sqdmlal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
}
define <2 x i64> @sqdmlal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
-; CHECK-LABEL: sqdmlal2_2d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: ldr d1, [x0, #8]
-; CHECK-NEXT: ldr d2, [x1, #8]
-; CHECK-NEXT: sqdmlal.2d v0, v1, v2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlal2_2d:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr q0, [x2]
+; CHECK-SD-NEXT: ldr d1, [x0, #8]
+; CHECK-SD-NEXT: ldr d2, [x1, #8]
+; CHECK-SD-NEXT: sqdmlal v0.2d, v1.2s, v2.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlal2_2d:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr q1, [x0]
+; CHECK-GI-NEXT: ldr q2, [x1]
+; CHECK-GI-NEXT: ldr q0, [x2]
+; CHECK-GI-NEXT: sqdmlal2 v0.2d, v1.4s, v2.4s
+; CHECK-GI-NEXT: ret
%load1 = load <4 x i32>, ptr %A
%load2 = load <4 x i32>, ptr %B
%tmp3 = load <2 x i64>, ptr %C
@@ -570,7 +677,7 @@ define <4 x i32> @sqdmlsl4s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: sqdmlsl.4s v0, v1, v2
+; CHECK-NEXT: sqdmlsl v0.4s, v1.4h, v2.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -586,7 +693,7 @@ define <2 x i64> @sqdmlsl2d(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: sqdmlsl.2d v0, v1, v2
+; CHECK-NEXT: sqdmlsl v0.2d, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -597,13 +704,21 @@ define <2 x i64> @sqdmlsl2d(ptr %A, ptr %B, ptr %C) nounwind {
}
define <4 x i32> @sqdmlsl2_4s(ptr %A, ptr %B, ptr %C) nounwind {
-; CHECK-LABEL: sqdmlsl2_4s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: ldr d1, [x0, #8]
-; CHECK-NEXT: ldr d2, [x1, #8]
-; CHECK-NEXT: sqdmlsl.4s v0, v1, v2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlsl2_4s:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr q0, [x2]
+; CHECK-SD-NEXT: ldr d1, [x0, #8]
+; CHECK-SD-NEXT: ldr d2, [x1, #8]
+; CHECK-SD-NEXT: sqdmlsl v0.4s, v1.4h, v2.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlsl2_4s:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr q1, [x0]
+; CHECK-GI-NEXT: ldr q2, [x1]
+; CHECK-GI-NEXT: ldr q0, [x2]
+; CHECK-GI-NEXT: sqdmlsl2 v0.4s, v1.8h, v2.8h
+; CHECK-GI-NEXT: ret
%load1 = load <8 x i16>, ptr %A
%load2 = load <8 x i16>, ptr %B
%tmp3 = load <4 x i32>, ptr %C
@@ -615,13 +730,21 @@ define <4 x i32> @sqdmlsl2_4s(ptr %A, ptr %B, ptr %C) nounwind {
}
define <2 x i64> @sqdmlsl2_2d(ptr %A, ptr %B, ptr %C) nounwind {
-; CHECK-LABEL: sqdmlsl2_2d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: ldr d1, [x0, #8]
-; CHECK-NEXT: ldr d2, [x1, #8]
-; CHECK-NEXT: sqdmlsl.2d v0, v1, v2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlsl2_2d:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr q0, [x2]
+; CHECK-SD-NEXT: ldr d1, [x0, #8]
+; CHECK-SD-NEXT: ldr d2, [x1, #8]
+; CHECK-SD-NEXT: sqdmlsl v0.2d, v1.2s, v2.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlsl2_2d:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr q1, [x0]
+; CHECK-GI-NEXT: ldr q2, [x1]
+; CHECK-GI-NEXT: ldr q0, [x2]
+; CHECK-GI-NEXT: sqdmlsl2 v0.2d, v1.4s, v2.4s
+; CHECK-GI-NEXT: ret
%load1 = load <4 x i32>, ptr %A
%load2 = load <4 x i32>, ptr %B
%tmp3 = load <2 x i64>, ptr %C
@@ -638,7 +761,7 @@ define <4 x i32> @umlal4s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: umlal.4s v0, v1, v2
+; CHECK-NEXT: umlal v0.4s, v1.4h, v2.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -654,7 +777,7 @@ define <2 x i64> @umlal2d(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: umlal.2d v0, v1, v2
+; CHECK-NEXT: umlal v0.2d, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -665,14 +788,24 @@ define <2 x i64> @umlal2d(ptr %A, ptr %B, ptr %C) nounwind {
}
define void @umlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
-; CHECK-LABEL: umlal8h_chain_with_constant:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi.16b v3, #1
-; CHECK-NEXT: umlal.8h v3, v0, v2
-; CHECK-NEXT: mvn.8b v0, v2
-; CHECK-NEXT: umlal.8h v3, v1, v0
-; CHECK-NEXT: str q3, [x0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: umlal8h_chain_with_constant:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v3.16b, #1
+; CHECK-SD-NEXT: umlal v3.8h, v0.8b, v2.8b
+; CHECK-SD-NEXT: mvn v0.8b, v2.8b
+; CHECK-SD-NEXT: umlal v3.8h, v1.8b, v0.8b
+; CHECK-SD-NEXT: str q3, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: umlal8h_chain_with_constant:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mvn v3.8b, v2.8b
+; CHECK-GI-NEXT: umull v1.8h, v1.8b, v3.8b
+; CHECK-GI-NEXT: movi v3.16b, #1
+; CHECK-GI-NEXT: umlal v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT: add v0.8h, v1.8h, v3.8h
+; CHECK-GI-NEXT: str q0, [x0]
+; CHECK-GI-NEXT: ret
%xor = xor <8 x i8> %v3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%umull.1 = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %v1, <8 x i8> %v3)
%add.1 = add <8 x i16> %umull.1, <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257>
@@ -683,15 +816,26 @@ define void @umlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <
}
define void @umlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
-; CHECK-LABEL: umlal2d_chain_with_constant:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #257 // =0x101
-; CHECK-NEXT: dup.2d v3, x8
-; CHECK-NEXT: umlal.2d v3, v0, v2
-; CHECK-NEXT: mvn.8b v0, v2
-; CHECK-NEXT: umlal.2d v3, v1, v0
-; CHECK-NEXT: str q3, [x0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: umlal2d_chain_with_constant:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #257 // =0x101
+; CHECK-SD-NEXT: dup v3.2d, x8
+; CHECK-SD-NEXT: umlal v3.2d, v0.2s, v2.2s
+; CHECK-SD-NEXT: mvn v0.8b, v2.8b
+; CHECK-SD-NEXT: umlal v3.2d, v1.2s, v0.2s
+; CHECK-SD-NEXT: str q3, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: umlal2d_chain_with_constant:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mvn v3.8b, v2.8b
+; CHECK-GI-NEXT: adrp x8, .LCPI43_0
+; CHECK-GI-NEXT: umull v1.2d, v1.2s, v3.2s
+; CHECK-GI-NEXT: umlal v1.2d, v0.2s, v2.2s
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI43_0]
+; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT: str q0, [x0]
+; CHECK-GI-NEXT: ret
%xor = xor <2 x i32> %v3, <i32 -1, i32 -1>
%umull.1 = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %v1, <2 x i32> %v3)
%add.1 = add <2 x i64> %umull.1, <i64 257, i64 257>
@@ -707,7 +851,7 @@ define <4 x i32> @umlsl4s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: umlsl.4s v0, v1, v2
+; CHECK-NEXT: umlsl v0.4s, v1.4h, v2.4h
; CHECK-NEXT: ret
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -723,7 +867,7 @@ define <2 x i64> @umlsl2d(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: umlsl.2d v0, v1, v2
+; CHECK-NEXT: umlsl v0.2d, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -736,10 +880,10 @@ define <2 x i64> @umlsl2d(ptr %A, ptr %B, ptr %C) nounwind {
define void @umlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
; CHECK-LABEL: umlsl8h_chain_with_constant:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi.16b v3, #1
-; CHECK-NEXT: umlsl.8h v3, v0, v2
-; CHECK-NEXT: mvn.8b v0, v2
-; CHECK-NEXT: umlsl.8h v3, v1, v0
+; CHECK-NEXT: movi v3.16b, #1
+; CHECK-NEXT: umlsl v3.8h, v0.8b, v2.8b
+; CHECK-NEXT: mvn v0.8b, v2.8b
+; CHECK-NEXT: umlsl v3.8h, v1.8b, v0.8b
; CHECK-NEXT: str q3, [x0]
; CHECK-NEXT: ret
%xor = xor <8 x i8> %v3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -752,15 +896,25 @@ define void @umlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <
}
define void @umlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
-; CHECK-LABEL: umlsl2d_chain_with_constant:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #257 // =0x101
-; CHECK-NEXT: dup.2d v3, x8
-; CHECK-NEXT: umlsl.2d v3, v0, v2
-; CHECK-NEXT: mvn.8b v0, v2
-; CHECK-NEXT: umlsl.2d v3, v1, v0
-; CHECK-NEXT: str q3, [x0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: umlsl2d_chain_with_constant:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #257 // =0x101
+; CHECK-SD-NEXT: dup v3.2d, x8
+; CHECK-SD-NEXT: umlsl v3.2d, v0.2s, v2.2s
+; CHECK-SD-NEXT: mvn v0.8b, v2.8b
+; CHECK-SD-NEXT: umlsl v3.2d, v1.2s, v0.2s
+; CHECK-SD-NEXT: str q3, [x0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: umlsl2d_chain_with_constant:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI47_0
+; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI47_0]
+; CHECK-GI-NEXT: umlsl v3.2d, v0.2s, v2.2s
+; CHECK-GI-NEXT: mvn v0.8b, v2.8b
+; CHECK-GI-NEXT: umlsl v3.2d, v1.2s, v0.2s
+; CHECK-GI-NEXT: str q3, [x0]
+; CHECK-GI-NEXT: ret
%xor = xor <2 x i32> %v3, <i32 -1, i32 -1>
%umull.1 = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %v1, <2 x i32> %v3)
%add.1 = sub <2 x i64> <i64 257, i64 257>, %umull.1
@@ -776,7 +930,7 @@ define <2 x float> @fmla_2s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr d0, [x2]
-; CHECK-NEXT: fmla.2s v0, v2, v1
+; CHECK-NEXT: fmla v0.2s, v2.2s, v1.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x float>, ptr %A
%tmp2 = load <2 x float>, ptr %B
@@ -791,7 +945,7 @@ define <4 x float> @fmla_4s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr q1, [x0]
; CHECK-NEXT: ldr q2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: fmla.4s v0, v2, v1
+; CHECK-NEXT: fmla v0.4s, v2.4s, v1.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x float>, ptr %A
%tmp2 = load <4 x float>, ptr %B
@@ -806,7 +960,7 @@ define <2 x double> @fmla_2d(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr q1, [x0]
; CHECK-NEXT: ldr q2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: fmla.2d v0, v2, v1
+; CHECK-NEXT: fmla v0.2d, v2.2d, v1.2d
; CHECK-NEXT: ret
%tmp1 = load <2 x double>, ptr %A
%tmp2 = load <2 x double>, ptr %B
@@ -825,7 +979,7 @@ define <2 x float> @fmls_2s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr d0, [x2]
-; CHECK-NEXT: fmls.2s v0, v1, v2
+; CHECK-NEXT: fmls v0.2s, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x float>, ptr %A
%tmp2 = load <2 x float>, ptr %B
@@ -841,7 +995,7 @@ define <4 x float> @fmls_4s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr q1, [x0]
; CHECK-NEXT: ldr q2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: fmls.4s v0, v1, v2
+; CHECK-NEXT: fmls v0.4s, v1.4s, v2.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x float>, ptr %A
%tmp2 = load <4 x float>, ptr %B
@@ -857,7 +1011,7 @@ define <2 x double> @fmls_2d(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr q1, [x0]
; CHECK-NEXT: ldr q2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: fmls.2d v0, v1, v2
+; CHECK-NEXT: fmls v0.2d, v1.2d, v2.2d
; CHECK-NEXT: ret
%tmp1 = load <2 x double>, ptr %A
%tmp2 = load <2 x double>, ptr %B
@@ -873,7 +1027,7 @@ define <2 x float> @fmls_commuted_neg_2s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr d1, [x0]
; CHECK-NEXT: ldr d2, [x1]
; CHECK-NEXT: ldr d0, [x2]
-; CHECK-NEXT: fmls.2s v0, v1, v2
+; CHECK-NEXT: fmls v0.2s, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp1 = load <2 x float>, ptr %A
%tmp2 = load <2 x float>, ptr %B
@@ -889,7 +1043,7 @@ define <4 x float> @fmls_commuted_neg_4s(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr q1, [x0]
; CHECK-NEXT: ldr q2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: fmls.4s v0, v1, v2
+; CHECK-NEXT: fmls v0.4s, v1.4s, v2.4s
; CHECK-NEXT: ret
%tmp1 = load <4 x float>, ptr %A
%tmp2 = load <4 x float>, ptr %B
@@ -905,7 +1059,7 @@ define <2 x double> @fmls_commuted_neg_2d(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-NEXT: ldr q1, [x0]
; CHECK-NEXT: ldr q2, [x1]
; CHECK-NEXT: ldr q0, [x2]
-; CHECK-NEXT: fmls.2d v0, v1, v2
+; CHECK-NEXT: fmls v0.2d, v1.2d, v2.2d
; CHECK-NEXT: ret
%tmp1 = load <2 x double>, ptr %A
%tmp2 = load <2 x double>, ptr %B
@@ -919,7 +1073,7 @@ define <2 x float> @fmls_indexed_2s(<2 x float> %a, <2 x float> %b, <2 x float>
; CHECK-LABEL: fmls_indexed_2s:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: fmls.2s v0, v2, v1[0]
+; CHECK-NEXT: fmls v0.2s, v2.2s, v1.s[0]
; CHECK-NEXT: ret
entry:
%0 = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %c
@@ -931,7 +1085,7 @@ entry:
define <4 x float> @fmls_indexed_4s(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone ssp {
; CHECK-LABEL: fmls_indexed_4s:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmls.4s v0, v2, v1[0]
+; CHECK-NEXT: fmls v0.4s, v2.4s, v1.s[0]
; CHECK-NEXT: ret
entry:
%0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
@@ -943,7 +1097,7 @@ entry:
define <2 x double> @fmls_indexed_2d(<2 x double> %a, <2 x double> %b, <2 x double> %c) nounwind readnone ssp {
; CHECK-LABEL: fmls_indexed_2d:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmls.2d v0, v2, v1[0]
+; CHECK-NEXT: fmls v0.2d, v2.2d, v1.d[0]
; CHECK-NEXT: ret
entry:
%0 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
@@ -956,7 +1110,7 @@ define <2 x float> @fmla_indexed_scalar_2s(<2 x float> %a, <2 x float> %b, float
; CHECK-LABEL: fmla_indexed_scalar_2s:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $s2 killed $s2 def $d2
-; CHECK-NEXT: fmla.2s v0, v1, v2
+; CHECK-NEXT: fmla v0.2s, v1.2s, v2.2s
; CHECK-NEXT: ret
entry:
%v1 = insertelement <2 x float> undef, float %c, i32 0
@@ -969,7 +1123,7 @@ define <4 x float> @fmla_indexed_scalar_4s(<4 x float> %a, <4 x float> %b, float
; CHECK-LABEL: fmla_indexed_scalar_4s:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2
-; CHECK-NEXT: fmla.4s v0, v1, v2[0]
+; CHECK-NEXT: fmla v0.4s, v1.4s, v2.s[0]
; CHECK-NEXT: ret
entry:
%v1 = insertelement <4 x float> undef, float %c, i32 0
@@ -984,7 +1138,7 @@ define <2 x double> @fmla_indexed_scalar_2d(<2 x double> %a, <2 x double> %b, do
; CHECK-LABEL: fmla_indexed_scalar_2d:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: fmla.2d v0, v1, v2[0]
+; CHECK-NEXT: fmla v0.2d, v1.2d, v2.d[0]
; CHECK-NEXT: ret
entry:
%v1 = insertelement <2 x double> undef, double %c, i32 0
@@ -997,7 +1151,7 @@ define <2 x float> @fmls_indexed_2s_strict(<2 x float> %a, <2 x float> %b, <2 x
; CHECK-LABEL: fmls_indexed_2s_strict:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: fmls.2s v0, v2, v1[0]
+; CHECK-NEXT: fmls v0.2s, v2.2s, v1.s[0]
; CHECK-NEXT: ret
entry:
%0 = fneg <2 x float> %c
@@ -1009,7 +1163,7 @@ entry:
define <4 x float> @fmls_indexed_4s_strict(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone ssp strictfp {
; CHECK-LABEL: fmls_indexed_4s_strict:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmls.4s v0, v2, v1[0]
+; CHECK-NEXT: fmls v0.4s, v2.4s, v1.s[0]
; CHECK-NEXT: ret
entry:
%0 = fneg <4 x float> %c
@@ -1021,7 +1175,7 @@ entry:
define <2 x double> @fmls_indexed_2d_strict(<2 x double> %a, <2 x double> %b, <2 x double> %c) nounwind readnone ssp strictfp {
; CHECK-LABEL: fmls_indexed_2d_strict:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmls.2d v0, v2, v1[0]
+; CHECK-NEXT: fmls v0.2d, v2.2d, v1.d[0]
; CHECK-NEXT: ret
entry:
%0 = fneg <2 x double> %c
@@ -1034,7 +1188,7 @@ define <2 x float> @fmla_indexed_scalar_2s_strict(<2 x float> %a, <2 x float> %b
; CHECK-LABEL: fmla_indexed_scalar_2s_strict:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2
-; CHECK-NEXT: fmla.2s v0, v1, v2[0]
+; CHECK-NEXT: fmla v0.2s, v1.2s, v2.s[0]
; CHECK-NEXT: ret
entry:
%v1 = insertelement <2 x float> undef, float %c, i32 0
@@ -1047,7 +1201,7 @@ define <4 x float> @fmla_indexed_scalar_4s_strict(<4 x float> %a, <4 x float> %b
; CHECK-LABEL: fmla_indexed_scalar_4s_strict:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2
-; CHECK-NEXT: fmla.4s v0, v1, v2[0]
+; CHECK-NEXT: fmla v0.4s, v1.4s, v2.s[0]
; CHECK-NEXT: ret
entry:
%v1 = insertelement <4 x float> undef, float %c, i32 0
@@ -1062,7 +1216,7 @@ define <2 x double> @fmla_indexed_scalar_2d_strict(<2 x double> %a, <2 x double>
; CHECK-LABEL: fmla_indexed_scalar_2d_strict:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: fmla.2d v0, v1, v2[0]
+; CHECK-NEXT: fmla v0.2d, v1.2d, v2.d[0]
; CHECK-NEXT: ret
entry:
%v1 = insertelement <2 x double> undef, double %c, i32 0
@@ -1081,7 +1235,7 @@ define <4 x i16> @mul_4h(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: mul_4h:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mul.4h v0, v0, v1[1]
+; CHECK-NEXT: mul v0.4h, v0.4h, v1.h[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = mul <4 x i16> %A, %tmp3
@@ -1091,7 +1245,7 @@ define <4 x i16> @mul_4h(<4 x i16> %A, <4 x i16> %B) nounwind {
define <8 x i16> @mul_8h(<8 x i16> %A, <8 x i16> %B) nounwind {
; CHECK-LABEL: mul_8h:
; CHECK: // %bb.0:
-; CHECK-NEXT: mul.8h v0, v0, v1[1]
+; CHECK-NEXT: mul v0.8h, v0.8h, v1.h[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <8 x i16> %B, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%tmp4 = mul <8 x i16> %A, %tmp3
@@ -1102,7 +1256,7 @@ define <2 x i32> @mul_2s(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: mul_2s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mul.2s v0, v0, v1[1]
+; CHECK-NEXT: mul v0.2s, v0.2s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = mul <2 x i32> %A, %tmp3
@@ -1112,7 +1266,7 @@ define <2 x i32> @mul_2s(<2 x i32> %A, <2 x i32> %B) nounwind {
define <4 x i32> @mul_4s(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK-LABEL: mul_4s:
; CHECK: // %bb.0:
-; CHECK-NEXT: mul.4s v0, v0, v1[1]
+; CHECK-NEXT: mul v0.4s, v0.4s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x i32> %B, <4 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = mul <4 x i32> %A, %tmp3
@@ -1120,17 +1274,29 @@ define <4 x i32> @mul_4s(<4 x i32> %A, <4 x i32> %B) nounwind {
}
define <2 x i64> @mul_2d(<2 x i64> %A, <2 x i64> %B) nounwind {
-; CHECK-LABEL: mul_2d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: fmov x10, d1
-; CHECK-NEXT: fmov x11, d0
-; CHECK-NEXT: mov.d x8, v1[1]
-; CHECK-NEXT: mov.d x9, v0[1]
-; CHECK-NEXT: mul x10, x11, x10
-; CHECK-NEXT: mul x8, x9, x8
-; CHECK-NEXT: fmov d0, x10
-; CHECK-NEXT: mov.d v0[1], x8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mul_2d:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: fmov x10, d1
+; CHECK-SD-NEXT: fmov x11, d0
+; CHECK-SD-NEXT: mov x8, v1.d[1]
+; CHECK-SD-NEXT: mov x9, v0.d[1]
+; CHECK-SD-NEXT: mul x10, x11, x10
+; CHECK-SD-NEXT: mul x8, x9, x8
+; CHECK-SD-NEXT: fmov d0, x10
+; CHECK-SD-NEXT: mov v0.d[1], x8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mul_2d:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: fmov x10, d0
+; CHECK-GI-NEXT: fmov x11, d1
+; CHECK-GI-NEXT: mov x8, v0.d[1]
+; CHECK-GI-NEXT: mov x9, v1.d[1]
+; CHECK-GI-NEXT: mul x10, x10, x11
+; CHECK-GI-NEXT: mul x8, x8, x9
+; CHECK-GI-NEXT: fmov d0, x10
+; CHECK-GI-NEXT: mov v0.d[1], x8
+; CHECK-GI-NEXT: ret
%tmp1 = mul <2 x i64> %A, %B
ret <2 x i64> %tmp1
}
@@ -1139,7 +1305,7 @@ define <2 x float> @fmul_lane_2s(<2 x float> %A, <2 x float> %B) nounwind {
; CHECK-LABEL: fmul_lane_2s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: fmul.2s v0, v0, v1[1]
+; CHECK-NEXT: fmul v0.2s, v0.2s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x float> %B, <2 x float> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = fmul <2 x float> %A, %tmp3
@@ -1149,7 +1315,7 @@ define <2 x float> @fmul_lane_2s(<2 x float> %A, <2 x float> %B) nounwind {
define <4 x float> @fmul_lane_4s(<4 x float> %A, <4 x float> %B) nounwind {
; CHECK-LABEL: fmul_lane_4s:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmul.4s v0, v0, v1[1]
+; CHECK-NEXT: fmul v0.4s, v0.4s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x float> %B, <4 x float> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = fmul <4 x float> %A, %tmp3
@@ -1159,7 +1325,7 @@ define <4 x float> @fmul_lane_4s(<4 x float> %A, <4 x float> %B) nounwind {
define <2 x double> @fmul_lane_2d(<2 x double> %A, <2 x double> %B) nounwind {
; CHECK-LABEL: fmul_lane_2d:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmul.2d v0, v0, v1[1]
+; CHECK-NEXT: fmul v0.2d, v0.2d, v1.d[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x double> %B, <2 x double> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = fmul <2 x double> %A, %tmp3
@@ -1169,7 +1335,7 @@ define <2 x double> @fmul_lane_2d(<2 x double> %A, <2 x double> %B) nounwind {
define float @fmul_lane_s(float %A, <4 x float> %vec) nounwind {
; CHECK-LABEL: fmul_lane_s:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmul.s s0, s0, v1[3]
+; CHECK-NEXT: fmul s0, s0, v1.s[3]
; CHECK-NEXT: ret
%B = extractelement <4 x float> %vec, i32 3
%res = fmul float %A, %B
@@ -1179,7 +1345,7 @@ define float @fmul_lane_s(float %A, <4 x float> %vec) nounwind {
define double @fmul_lane_d(double %A, <2 x double> %vec) nounwind {
; CHECK-LABEL: fmul_lane_d:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmul.d d0, d0, v1[1]
+; CHECK-NEXT: fmul d0, d0, v1.d[1]
; CHECK-NEXT: ret
%B = extractelement <2 x double> %vec, i32 1
%res = fmul double %A, %B
@@ -1192,7 +1358,7 @@ define <2 x float> @fmulx_lane_2s(<2 x float> %A, <2 x float> %B) nounwind {
; CHECK-LABEL: fmulx_lane_2s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: fmulx.2s v0, v0, v1[1]
+; CHECK-NEXT: fmulx v0.2s, v0.2s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x float> %B, <2 x float> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %A, <2 x float> %tmp3)
@@ -1202,7 +1368,7 @@ define <2 x float> @fmulx_lane_2s(<2 x float> %A, <2 x float> %B) nounwind {
define <4 x float> @fmulx_lane_4s(<4 x float> %A, <4 x float> %B) nounwind {
; CHECK-LABEL: fmulx_lane_4s:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmulx.4s v0, v0, v1[1]
+; CHECK-NEXT: fmulx v0.4s, v0.4s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x float> %B, <4 x float> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %A, <4 x float> %tmp3)
@@ -1212,7 +1378,7 @@ define <4 x float> @fmulx_lane_4s(<4 x float> %A, <4 x float> %B) nounwind {
define <2 x double> @fmulx_lane_2d(<2 x double> %A, <2 x double> %B) nounwind {
; CHECK-LABEL: fmulx_lane_2d:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmulx.2d v0, v0, v1[1]
+; CHECK-NEXT: fmulx v0.2d, v0.2d, v1.d[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x double> %B, <2 x double> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %A, <2 x double> %tmp3)
@@ -1223,7 +1389,7 @@ define <4 x i16> @sqdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: sqdmulh_lane_4h:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqdmulh.4h v0, v0, v1[1]
+; CHECK-NEXT: sqdmulh v0.4h, v0.4h, v1.h[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %A, <4 x i16> %tmp3)
@@ -1233,7 +1399,7 @@ define <4 x i16> @sqdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind {
define <8 x i16> @sqdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind {
; CHECK-LABEL: sqdmulh_lane_8h:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmulh.8h v0, v0, v1[1]
+; CHECK-NEXT: sqdmulh v0.8h, v0.8h, v1.h[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <8 x i16> %B, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %A, <8 x i16> %tmp3)
@@ -1244,7 +1410,7 @@ define <2 x i32> @sqdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: sqdmulh_lane_2s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqdmulh.2s v0, v0, v1[1]
+; CHECK-NEXT: sqdmulh v0.2s, v0.2s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %A, <2 x i32> %tmp3)
@@ -1254,7 +1420,7 @@ define <2 x i32> @sqdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind {
define <4 x i32> @sqdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK-LABEL: sqdmulh_lane_4s:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmulh.4s v0, v0, v1[1]
+; CHECK-NEXT: sqdmulh v0.4s, v0.4s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x i32> %B, <4 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %A, <4 x i32> %tmp3)
@@ -1265,7 +1431,7 @@ define i32 @sqdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind {
; CHECK-LABEL: sqdmulh_lane_1s:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov s1, w0
-; CHECK-NEXT: sqdmulh.s s0, s1, v0[1]
+; CHECK-NEXT: sqdmulh s0, s1, v0.s[1]
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
%tmp1 = extractelement <4 x i32> %B, i32 1
@@ -1277,7 +1443,7 @@ define <4 x i16> @sqrdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: sqrdmulh_lane_4h:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqrdmulh.4h v0, v0, v1[1]
+; CHECK-NEXT: sqrdmulh v0.4h, v0.4h, v1.h[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %A, <4 x i16> %tmp3)
@@ -1287,7 +1453,7 @@ define <4 x i16> @sqrdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind {
define <8 x i16> @sqrdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind {
; CHECK-LABEL: sqrdmulh_lane_8h:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqrdmulh.8h v0, v0, v1[1]
+; CHECK-NEXT: sqrdmulh v0.8h, v0.8h, v1.h[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <8 x i16> %B, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %A, <8 x i16> %tmp3)
@@ -1298,7 +1464,7 @@ define <2 x i32> @sqrdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: sqrdmulh_lane_2s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqrdmulh.2s v0, v0, v1[1]
+; CHECK-NEXT: sqrdmulh v0.2s, v0.2s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %A, <2 x i32> %tmp3)
@@ -1308,7 +1474,7 @@ define <2 x i32> @sqrdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind {
define <4 x i32> @sqrdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK-LABEL: sqrdmulh_lane_4s:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqrdmulh.4s v0, v0, v1[1]
+; CHECK-NEXT: sqrdmulh v0.4s, v0.4s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x i32> %B, <4 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %A, <4 x i32> %tmp3)
@@ -1319,7 +1485,7 @@ define i32 @sqrdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind {
; CHECK-LABEL: sqrdmulh_lane_1s:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov s1, w0
-; CHECK-NEXT: sqrdmulh.s s0, s1, v0[1]
+; CHECK-NEXT: sqrdmulh s0, s1, v0.s[1]
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
%tmp1 = extractelement <4 x i32> %B, i32 1
@@ -1331,7 +1497,7 @@ define <4 x i32> @sqdmull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: sqdmull_lane_4s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqdmull.4s v0, v0, v1[1]
+; CHECK-NEXT: sqdmull v0.4s, v0.4h, v1.h[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %A, <4 x i16> %tmp3)
@@ -1342,7 +1508,7 @@ define <2 x i64> @sqdmull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: sqdmull_lane_2d:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqdmull.2d v0, v0, v1[1]
+; CHECK-NEXT: sqdmull v0.2d, v0.2s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %A, <2 x i32> %tmp3)
@@ -1350,10 +1516,16 @@ define <2 x i64> @sqdmull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind {
}
define <4 x i32> @sqdmull2_lane_4s(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK-LABEL: sqdmull2_lane_4s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmull2.4s v0, v0, v1[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmull2_lane_4s:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmull2 v0.4s, v0.8h, v1.h[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmull2_lane_4s:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d0, v0.d[1]
+; CHECK-GI-NEXT: sqdmull v0.4s, v0.4h, v1.h[1]
+; CHECK-GI-NEXT: ret
%tmp1 = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp2 = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -1361,10 +1533,16 @@ define <4 x i32> @sqdmull2_lane_4s(<8 x i16> %A, <8 x i16> %B) nounwind {
}
define <2 x i64> @sqdmull2_lane_2d(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK-LABEL: sqdmull2_lane_2d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmull2.2d v0, v0, v1[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmull2_lane_2d:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmull2 v0.2d, v0.4s, v1.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmull2_lane_2d:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d0, v0.d[1]
+; CHECK-GI-NEXT: sqdmull v0.2d, v0.2s, v1.s[1]
+; CHECK-GI-NEXT: ret
%tmp1 = shufflevector <4 x i32> %A, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp2 = shufflevector <4 x i32> %B, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
%tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -1375,7 +1553,7 @@ define <4 x i32> @umull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: umull_lane_4s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: umull.4s v0, v0, v1[1]
+; CHECK-NEXT: umull v0.4s, v0.4h, v1.h[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %A, <4 x i16> %tmp3)
@@ -1386,7 +1564,7 @@ define <2 x i64> @umull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: umull_lane_2d:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: umull.2d v0, v0, v1[1]
+; CHECK-NEXT: umull v0.2d, v0.2s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %tmp3)
@@ -1397,7 +1575,7 @@ define <4 x i32> @smull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind {
; CHECK-LABEL: smull_lane_4s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: smull.4s v0, v0, v1[1]
+; CHECK-NEXT: smull v0.4s, v0.4h, v1.h[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %A, <4 x i16> %tmp3)
@@ -1408,7 +1586,7 @@ define <2 x i64> @smull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind {
; CHECK-LABEL: smull_lane_2d:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: smull.2d v0, v0, v1[1]
+; CHECK-NEXT: smull v0.2d, v0.2s, v1.s[1]
; CHECK-NEXT: ret
%tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %tmp3)
@@ -1419,8 +1597,8 @@ define <4 x i32> @smlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi
; CHECK-LABEL: smlal_lane_4s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: smlal.4s v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: smlal v2.4s, v0.4h, v1.h[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %A, <4 x i16> %tmp4)
@@ -1432,8 +1610,8 @@ define <2 x i64> @smlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi
; CHECK-LABEL: smlal_lane_2d:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: smlal.2d v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: smlal v2.2d, v0.2s, v1.s[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %tmp4)
@@ -1445,8 +1623,8 @@ define <4 x i32> @sqdmlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) noun
; CHECK-LABEL: sqdmlal_lane_4s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqdmlal.4s v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: sqdmlal v2.4s, v0.4h, v1.h[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %A, <4 x i16> %tmp4)
@@ -1458,8 +1636,8 @@ define <2 x i64> @sqdmlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) noun
; CHECK-LABEL: sqdmlal_lane_2d:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqdmlal.2d v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: sqdmlal v2.2d, v0.2s, v1.s[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %A, <2 x i32> %tmp4)
@@ -1468,11 +1646,18 @@ define <2 x i64> @sqdmlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) noun
}
define <4 x i32> @sqdmlal2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nounwind {
-; CHECK-LABEL: sqdmlal2_lane_4s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal2.4s v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlal2_lane_4s:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmlal2 v2.4s, v0.8h, v1.h[1]
+; CHECK-SD-NEXT: mov v0.16b, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlal2_lane_4s:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d3, v0.d[1]
+; CHECK-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-GI-NEXT: sqdmlal v0.4s, v3.4h, v1.h[1]
+; CHECK-GI-NEXT: ret
%tmp1 = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp2 = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -1481,11 +1666,18 @@ define <4 x i32> @sqdmlal2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nou
}
define <2 x i64> @sqdmlal2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nounwind {
-; CHECK-LABEL: sqdmlal2_lane_2d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal2.2d v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlal2_lane_2d:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmlal2 v2.2d, v0.4s, v1.s[1]
+; CHECK-SD-NEXT: mov v0.16b, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlal2_lane_2d:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d3, v0.d[1]
+; CHECK-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-GI-NEXT: sqdmlal v0.2d, v3.2s, v1.s[1]
+; CHECK-GI-NEXT: ret
%tmp1 = shufflevector <4 x i32> %A, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp2 = shufflevector <4 x i32> %B, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
%tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -1499,7 +1691,7 @@ define i32 @sqdmlal_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind {
; CHECK-NEXT: fmov s1, w1
; CHECK-NEXT: fmov s2, w0
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: sqdmlal.h s2, h1, v0[1]
+; CHECK-NEXT: sqdmlal s2, h1, v0.h[1]
; CHECK-NEXT: fmov w0, s2
; CHECK-NEXT: ret
%lhs = insertelement <4 x i16> undef, i16 %B, i32 0
@@ -1517,7 +1709,7 @@ define i32 @sqdmlsl_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind {
; CHECK-NEXT: fmov s1, w1
; CHECK-NEXT: fmov s2, w0
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: sqdmlsl.h s2, h1, v0[1]
+; CHECK-NEXT: sqdmlsl s2, h1, v0.h[1]
; CHECK-NEXT: fmov w0, s2
; CHECK-NEXT: ret
%lhs = insertelement <4 x i16> undef, i16 %B, i32 0
@@ -1530,15 +1722,24 @@ define i32 @sqdmlsl_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind {
declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32)
define i32 @sqadd_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind {
-; CHECK-LABEL: sqadd_lane1_sqdmull4s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmull.4s v0, v0, v1
-; CHECK-NEXT: mov.s w8, v0[1]
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: fmov s1, w8
-; CHECK-NEXT: sqadd s0, s0, s1
-; CHECK-NEXT: fmov w0, s0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqadd_lane1_sqdmull4s:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmull v0.4s, v0.4h, v1.4h
+; CHECK-SD-NEXT: mov w8, v0.s[1]
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: fmov s1, w8
+; CHECK-SD-NEXT: sqadd s0, s0, s1
+; CHECK-SD-NEXT: fmov w0, s0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqadd_lane1_sqdmull4s:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sqdmull v0.4s, v0.4h, v1.4h
+; CHECK-GI-NEXT: fmov s1, w0
+; CHECK-GI-NEXT: mov s0, v0.s[1]
+; CHECK-GI-NEXT: sqadd s0, s1, s0
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
%prod.vec = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %B, <4 x i16> %C)
%prod = extractelement <4 x i32> %prod.vec, i32 1
%res = call i32 @llvm.aarch64.neon.sqadd.i32(i32 %A, i32 %prod)
@@ -1546,15 +1747,24 @@ define i32 @sqadd_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind {
}
define i32 @sqsub_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind {
-; CHECK-LABEL: sqsub_lane1_sqdmull4s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmull.4s v0, v0, v1
-; CHECK-NEXT: mov.s w8, v0[1]
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: fmov s1, w8
-; CHECK-NEXT: sqsub s0, s0, s1
-; CHECK-NEXT: fmov w0, s0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqsub_lane1_sqdmull4s:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmull v0.4s, v0.4h, v1.4h
+; CHECK-SD-NEXT: mov w8, v0.s[1]
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: fmov s1, w8
+; CHECK-SD-NEXT: sqsub s0, s0, s1
+; CHECK-SD-NEXT: fmov w0, s0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqsub_lane1_sqdmull4s:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sqdmull v0.4s, v0.4h, v1.4h
+; CHECK-GI-NEXT: fmov s1, w0
+; CHECK-GI-NEXT: mov s0, v0.s[1]
+; CHECK-GI-NEXT: sqsub s0, s1, s0
+; CHECK-GI-NEXT: fmov w0, s0
+; CHECK-GI-NEXT: ret
%prod.vec = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %B, <4 x i16> %C)
%prod = extractelement <4 x i32> %prod.vec, i32 1
%res = call i32 @llvm.aarch64.neon.sqsub.i32(i32 %A, i32 %prod)
@@ -1567,7 +1777,7 @@ define i64 @sqdmlal_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
; CHECK-NEXT: fmov d1, x0
; CHECK-NEXT: fmov s2, w1
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: sqdmlal.s d1, s2, v0[1]
+; CHECK-NEXT: sqdmlal d1, s2, v0.s[1]
; CHECK-NEXT: fmov x0, d1
; CHECK-NEXT: ret
%rhs = extractelement <2 x i32> %C, i32 1
@@ -1584,7 +1794,7 @@ define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
; CHECK-NEXT: fmov d1, x0
; CHECK-NEXT: fmov s2, w1
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: sqdmlsl.s d1, s2, v0[1]
+; CHECK-NEXT: sqdmlsl d1, s2, v0.s[1]
; CHECK-NEXT: fmov x0, d1
; CHECK-NEXT: ret
%rhs = extractelement <2 x i32> %C, i32 1
@@ -1599,8 +1809,8 @@ define <4 x i32> @umlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi
; CHECK-LABEL: umlal_lane_4s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: umlal.4s v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: umlal v2.4s, v0.4h, v1.h[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %A, <4 x i16> %tmp4)
@@ -1612,8 +1822,8 @@ define <2 x i64> @umlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi
; CHECK-LABEL: umlal_lane_2d:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: umlal.2d v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: umlal v2.2d, v0.2s, v1.s[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %tmp4)
@@ -1626,8 +1836,8 @@ define <4 x i32> @smlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi
; CHECK-LABEL: smlsl_lane_4s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: smlsl.4s v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: smlsl v2.4s, v0.4h, v1.h[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %A, <4 x i16> %tmp4)
@@ -1639,8 +1849,8 @@ define <2 x i64> @smlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi
; CHECK-LABEL: smlsl_lane_2d:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: smlsl.2d v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: smlsl v2.2d, v0.2s, v1.s[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %tmp4)
@@ -1652,8 +1862,8 @@ define <4 x i32> @sqdmlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) noun
; CHECK-LABEL: sqdmlsl_lane_4s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqdmlsl.4s v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: sqdmlsl v2.4s, v0.4h, v1.h[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %A, <4 x i16> %tmp4)
@@ -1665,8 +1875,8 @@ define <2 x i64> @sqdmlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) noun
; CHECK-LABEL: sqdmlsl_lane_2d:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: sqdmlsl.2d v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: sqdmlsl v2.2d, v0.2s, v1.s[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %A, <2 x i32> %tmp4)
@@ -1675,11 +1885,18 @@ define <2 x i64> @sqdmlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) noun
}
define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nounwind {
-; CHECK-LABEL: sqdmlsl2_lane_4s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlsl2.4s v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlsl2_lane_4s:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmlsl2 v2.4s, v0.8h, v1.h[1]
+; CHECK-SD-NEXT: mov v0.16b, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlsl2_lane_4s:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d3, v0.d[1]
+; CHECK-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-GI-NEXT: sqdmlsl v0.4s, v3.4h, v1.h[1]
+; CHECK-GI-NEXT: ret
%tmp1 = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp2 = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -1688,11 +1905,18 @@ define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nou
}
define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nounwind {
-; CHECK-LABEL: sqdmlsl2_lane_2d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlsl2.2d v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlsl2_lane_2d:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmlsl2 v2.2d, v0.4s, v1.s[1]
+; CHECK-SD-NEXT: mov v0.16b, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlsl2_lane_2d:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d3, v0.d[1]
+; CHECK-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-GI-NEXT: sqdmlsl v0.2d, v3.2s, v1.s[1]
+; CHECK-GI-NEXT: ret
%tmp1 = shufflevector <4 x i32> %A, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp2 = shufflevector <4 x i32> %B, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
%tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -1704,8 +1928,8 @@ define <4 x i32> @umlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi
; CHECK-LABEL: umlsl_lane_4s:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: umlsl.4s v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: umlsl v2.4s, v0.4h, v1.h[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %A, <4 x i16> %tmp4)
@@ -1717,8 +1941,8 @@ define <2 x i64> @umlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi
; CHECK-LABEL: umlsl_lane_2d:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: umlsl.2d v2, v0, v1[1]
-; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: umlsl v2.2d, v0.2s, v1.s[1]
+; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %tmp4)
@@ -1748,7 +1972,7 @@ define double @fmulxd(double %a, double %b) nounwind {
define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind {
; CHECK-LABEL: fmulxs_lane:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmulx.s s0, s0, v1[3]
+; CHECK-NEXT: fmulx s0, s0, v1.s[3]
; CHECK-NEXT: ret
%b = extractelement <4 x float> %vec, i32 3
%fmulx.i = tail call float @llvm.aarch64.neon.fmulx.f32(float %a, float %b) nounwind
@@ -1758,7 +1982,7 @@ define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind {
define double @fmulxd_lane(double %a, <2 x double> %vec) nounwind {
; CHECK-LABEL: fmulxd_lane:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmulx.d d0, d0, v1[1]
+; CHECK-NEXT: fmulx d0, d0, v1.d[1]
; CHECK-NEXT: ret
%b = extractelement <2 x double> %vec, i32 1
%fmulx.i = tail call double @llvm.aarch64.neon.fmulx.f64(double %a, double %b) nounwind
@@ -1772,7 +1996,7 @@ declare float @llvm.aarch64.neon.fmulx.f32(float, float) nounwind readnone
define <8 x i16> @smull2_8h_simple(<16 x i8> %a, <16 x i8> %b) nounwind {
; CHECK-LABEL: smull2_8h_simple:
; CHECK: // %bb.0:
-; CHECK-NEXT: smull2.8h v0, v0, v1
+; CHECK-NEXT: smull2 v0.8h, v0.16b, v1.16b
; CHECK-NEXT: ret
%1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -1783,7 +2007,7 @@ define <8 x i16> @smull2_8h_simple(<16 x i8> %a, <16 x i8> %b) nounwind {
define <8 x i16> @foo0(<16 x i8> %a, <16 x i8> %b) nounwind {
; CHECK-LABEL: foo0:
; CHECK: // %bb.0:
-; CHECK-NEXT: smull2.8h v0, v0, v1
+; CHECK-NEXT: smull2 v0.8h, v0.16b, v1.16b
; CHECK-NEXT: ret
%tmp = bitcast <16 x i8> %a to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1798,7 +2022,7 @@ define <8 x i16> @foo0(<16 x i8> %a, <16 x i8> %b) nounwind {
define <4 x i32> @foo1(<8 x i16> %a, <8 x i16> %b) nounwind {
; CHECK-LABEL: foo1:
; CHECK: // %bb.0:
-; CHECK-NEXT: smull2.4s v0, v0, v1
+; CHECK-NEXT: smull2 v0.4s, v0.8h, v1.8h
; CHECK-NEXT: ret
%tmp = bitcast <8 x i16> %a to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1813,7 +2037,7 @@ define <4 x i32> @foo1(<8 x i16> %a, <8 x i16> %b) nounwind {
define <2 x i64> @foo2(<4 x i32> %a, <4 x i32> %b) nounwind {
; CHECK-LABEL: foo2:
; CHECK: // %bb.0:
-; CHECK-NEXT: smull2.2d v0, v0, v1
+; CHECK-NEXT: smull2 v0.2d, v0.4s, v1.4s
; CHECK-NEXT: ret
%tmp = bitcast <4 x i32> %a to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1828,7 +2052,7 @@ define <2 x i64> @foo2(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @foo3(<16 x i8> %a, <16 x i8> %b) nounwind {
; CHECK-LABEL: foo3:
; CHECK: // %bb.0:
-; CHECK-NEXT: umull2.8h v0, v0, v1
+; CHECK-NEXT: umull2 v0.8h, v0.16b, v1.16b
; CHECK-NEXT: ret
%tmp = bitcast <16 x i8> %a to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1843,7 +2067,7 @@ define <8 x i16> @foo3(<16 x i8> %a, <16 x i8> %b) nounwind {
define <4 x i32> @foo4(<8 x i16> %a, <8 x i16> %b) nounwind {
; CHECK-LABEL: foo4:
; CHECK: // %bb.0:
-; CHECK-NEXT: umull2.4s v0, v0, v1
+; CHECK-NEXT: umull2 v0.4s, v0.8h, v1.8h
; CHECK-NEXT: ret
%tmp = bitcast <8 x i16> %a to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1858,7 +2082,7 @@ define <4 x i32> @foo4(<8 x i16> %a, <8 x i16> %b) nounwind {
define <2 x i64> @foo5(<4 x i32> %a, <4 x i32> %b) nounwind {
; CHECK-LABEL: foo5:
; CHECK: // %bb.0:
-; CHECK-NEXT: umull2.2d v0, v0, v1
+; CHECK-NEXT: umull2 v0.2d, v0.4s, v1.4s
; CHECK-NEXT: ret
%tmp = bitcast <4 x i32> %a to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1871,11 +2095,18 @@ define <2 x i64> @foo5(<4 x i32> %a, <4 x i32> %b) nounwind {
}
define <4 x i32> @foo6(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp {
-; CHECK-LABEL: foo6:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: smull2.4s v0, v1, v2[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: foo6:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: smull2 v0.4s, v1.8h, v2.h[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: foo6:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v1.d[1]
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: smull v0.4s, v0.4h, v2.h[1]
+; CHECK-GI-NEXT: ret
entry:
%0 = bitcast <8 x i16> %b to <2 x i64>
%shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1889,7 +2120,7 @@ define <4 x i32> @foo6a(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readn
; CHECK-LABEL: foo6a:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: smull.4s v0, v1, v2[1]
+; CHECK-NEXT: smull v0.4s, v1.4h, v2.h[1]
; CHECK-NEXT: ret
entry:
%0 = bitcast <8 x i16> %b to <2 x i64>
@@ -1901,11 +2132,18 @@ entry:
}
define <2 x i64> @foo7(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp {
-; CHECK-LABEL: foo7:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: smull2.2d v0, v1, v2[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: foo7:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: smull2 v0.2d, v1.4s, v2.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: foo7:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v1.d[1]
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: smull v0.2d, v0.2s, v2.s[1]
+; CHECK-GI-NEXT: ret
entry:
%0 = bitcast <4 x i32> %b to <2 x i64>
%shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1919,7 +2157,7 @@ define <2 x i64> @foo7a(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readn
; CHECK-LABEL: foo7a:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: smull.2d v0, v1, v2[1]
+; CHECK-NEXT: smull v0.2d, v1.2s, v2.s[1]
; CHECK-NEXT: ret
entry:
%0 = bitcast <4 x i32> %b to <2 x i64>
@@ -1932,11 +2170,18 @@ entry:
define <4 x i32> @foo8(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp {
-; CHECK-LABEL: foo8:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: umull2.4s v0, v1, v2[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: foo8:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: umull2 v0.4s, v1.8h, v2.h[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: foo8:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v1.d[1]
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: umull v0.4s, v0.4h, v2.h[1]
+; CHECK-GI-NEXT: ret
entry:
%0 = bitcast <8 x i16> %b to <2 x i64>
%shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1950,7 +2195,7 @@ define <4 x i32> @foo8a(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readn
; CHECK-LABEL: foo8a:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: umull.4s v0, v1, v2[1]
+; CHECK-NEXT: umull v0.4s, v1.4h, v2.h[1]
; CHECK-NEXT: ret
entry:
%0 = bitcast <8 x i16> %b to <2 x i64>
@@ -1962,11 +2207,18 @@ entry:
}
define <2 x i64> @foo9(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp {
-; CHECK-LABEL: foo9:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: umull2.2d v0, v1, v2[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: foo9:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: umull2 v0.2d, v1.4s, v2.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: foo9:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v1.d[1]
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: umull v0.2d, v0.2s, v2.s[1]
+; CHECK-GI-NEXT: ret
entry:
%0 = bitcast <4 x i32> %b to <2 x i64>
%shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
@@ -1980,7 +2232,7 @@ define <2 x i64> @foo9a(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readn
; CHECK-LABEL: foo9a:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: umull.2d v0, v1, v2[1]
+; CHECK-NEXT: umull v0.2d, v1.2s, v2.s[1]
; CHECK-NEXT: ret
entry:
%0 = bitcast <4 x i32> %b to <2 x i64>
@@ -1994,7 +2246,7 @@ entry:
define <8 x i16> @bar0(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind {
; CHECK-LABEL: bar0:
; CHECK: // %bb.0:
-; CHECK-NEXT: smlal2.8h v0, v1, v2
+; CHECK-NEXT: smlal2 v0.8h, v1.16b, v2.16b
; CHECK-NEXT: ret
%tmp = bitcast <16 x i8> %b to <2 x i64>
%shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2010,7 +2262,7 @@ define <8 x i16> @bar0(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind {
define <4 x i32> @bar1(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind {
; CHECK-LABEL: bar1:
; CHECK: // %bb.0:
-; CHECK-NEXT: smlal2.4s v0, v1, v2
+; CHECK-NEXT: smlal2 v0.4s, v1.8h, v2.8h
; CHECK-NEXT: ret
%tmp = bitcast <8 x i16> %b to <2 x i64>
%shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2026,7 +2278,7 @@ define <4 x i32> @bar1(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind {
define <2 x i64> @bar2(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind {
; CHECK-LABEL: bar2:
; CHECK: // %bb.0:
-; CHECK-NEXT: smlal2.2d v0, v1, v2
+; CHECK-NEXT: smlal2 v0.2d, v1.4s, v2.4s
; CHECK-NEXT: ret
%tmp = bitcast <4 x i32> %b to <2 x i64>
%shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2042,7 +2294,7 @@ define <2 x i64> @bar2(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind {
define <8 x i16> @bar3(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind {
; CHECK-LABEL: bar3:
; CHECK: // %bb.0:
-; CHECK-NEXT: umlal2.8h v0, v1, v2
+; CHECK-NEXT: umlal2 v0.8h, v1.16b, v2.16b
; CHECK-NEXT: ret
%tmp = bitcast <16 x i8> %b to <2 x i64>
%shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2058,7 +2310,7 @@ define <8 x i16> @bar3(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind {
define <4 x i32> @bar4(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind {
; CHECK-LABEL: bar4:
; CHECK: // %bb.0:
-; CHECK-NEXT: umlal2.4s v0, v1, v2
+; CHECK-NEXT: umlal2 v0.4s, v1.8h, v2.8h
; CHECK-NEXT: ret
%tmp = bitcast <8 x i16> %b to <2 x i64>
%shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2074,7 +2326,7 @@ define <4 x i32> @bar4(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind {
define <2 x i64> @bar5(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind {
; CHECK-LABEL: bar5:
; CHECK: // %bb.0:
-; CHECK-NEXT: umlal2.2d v0, v1, v2
+; CHECK-NEXT: umlal2 v0.2d, v1.4s, v2.4s
; CHECK-NEXT: ret
%tmp = bitcast <4 x i32> %b to <2 x i64>
%shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2088,11 +2340,18 @@ define <2 x i64> @bar5(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind {
}
define <4 x i32> @mlal2_1(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind {
-; CHECK-LABEL: mlal2_1:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: smlal2.4s v0, v1, v2[3]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mlal2_1:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: smlal2 v0.4s, v1.8h, v2.h[3]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mlal2_1:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: dup v2.8h, v2.h[3]
+; CHECK-GI-NEXT: smlal2 v0.4s, v1.8h, v2.8h
+; CHECK-GI-NEXT: ret
%shuffle = shufflevector <4 x i16> %c, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
%tmp = bitcast <8 x i16> %b to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2106,11 +2365,18 @@ define <4 x i32> @mlal2_1(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind {
}
define <2 x i64> @mlal2_2(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind {
-; CHECK-LABEL: mlal2_2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: smlal2.2d v0, v1, v2[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mlal2_2:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: smlal2 v0.2d, v1.4s, v2.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mlal2_2:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: dup v2.4s, v2.s[1]
+; CHECK-GI-NEXT: smlal2 v0.2d, v1.4s, v2.4s
+; CHECK-GI-NEXT: ret
%shuffle = shufflevector <2 x i32> %c, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%tmp = bitcast <4 x i32> %b to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2124,11 +2390,18 @@ define <2 x i64> @mlal2_2(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind {
}
define <4 x i32> @mlal2_4(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind {
-; CHECK-LABEL: mlal2_4:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: umlal2.4s v0, v1, v2[2]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mlal2_4:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: umlal2 v0.4s, v1.8h, v2.h[2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mlal2_4:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: dup v2.8h, v2.h[2]
+; CHECK-GI-NEXT: umlal2 v0.4s, v1.8h, v2.8h
+; CHECK-GI-NEXT: ret
%shuffle = shufflevector <4 x i16> %c, <4 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
%tmp = bitcast <8 x i16> %b to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2142,11 +2415,18 @@ define <4 x i32> @mlal2_4(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind {
}
define <2 x i64> @mlal2_5(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind {
-; CHECK-LABEL: mlal2_5:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: umlal2.2d v0, v1, v2[0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mlal2_5:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: umlal2 v0.2d, v1.4s, v2.s[0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mlal2_5:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: dup v2.4s, v2.s[0]
+; CHECK-GI-NEXT: umlal2 v0.2d, v1.4s, v2.4s
+; CHECK-GI-NEXT: ret
%shuffle = shufflevector <2 x i32> %c, <2 x i32> undef, <4 x i32> zeroinitializer
%tmp = bitcast <4 x i32> %b to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2164,7 +2444,7 @@ define <2 x double> @vmulq_n_f64(<2 x double> %x, double %y) nounwind readnone s
; CHECK-LABEL: vmulq_n_f64:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: fmul.2d v0, v0, v1[0]
+; CHECK-NEXT: fmul v0.2d, v0.2d, v1.d[0]
; CHECK-NEXT: ret
entry:
%vecinit.i = insertelement <2 x double> undef, double %y, i32 0
@@ -2177,7 +2457,7 @@ define <4 x float> @vmulq_n_f32(<4 x float> %x, float %y) nounwind readnone ssp
; CHECK-LABEL: vmulq_n_f32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1
-; CHECK-NEXT: fmul.4s v0, v0, v1[0]
+; CHECK-NEXT: fmul v0.4s, v0.4s, v1.s[0]
; CHECK-NEXT: ret
entry:
%vecinit.i = insertelement <4 x float> undef, float %y, i32 0
@@ -2192,7 +2472,7 @@ define <2 x float> @vmul_n_f32(<2 x float> %x, float %y) nounwind readnone ssp {
; CHECK-LABEL: vmul_n_f32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1
-; CHECK-NEXT: fmul.2s v0, v0, v1[0]
+; CHECK-NEXT: fmul v0.2s, v0.2s, v1.s[0]
; CHECK-NEXT: ret
entry:
%vecinit.i = insertelement <2 x float> undef, float %y, i32 0
@@ -2204,7 +2484,7 @@ entry:
define <4 x i16> @vmla_laneq_s16_test(<4 x i16> %a, <4 x i16> %b, <8 x i16> %c) nounwind readnone ssp {
; CHECK-LABEL: vmla_laneq_s16_test:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mla.4h v0, v1, v2[6]
+; CHECK-NEXT: mla v0.4h, v1.4h, v2.h[6]
; CHECK-NEXT: ret
entry:
%shuffle = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 6, i32 6, i32 6, i32 6>
@@ -2216,7 +2496,7 @@ entry:
define <2 x i32> @vmla_laneq_s32_test(<2 x i32> %a, <2 x i32> %b, <4 x i32> %c) nounwind readnone ssp {
; CHECK-LABEL: vmla_laneq_s32_test:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mla.2s v0, v1, v2[3]
+; CHECK-NEXT: mla v0.2s, v1.2s, v2.s[3]
; CHECK-NEXT: ret
entry:
%shuffle = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
@@ -2226,10 +2506,16 @@ entry:
}
define <8 x i16> @not_really_vmlaq_laneq_s16_test(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone ssp {
-; CHECK-LABEL: not_really_vmlaq_laneq_s16_test:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mla.8h v0, v1, v2[5]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: not_really_vmlaq_laneq_s16_test:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: mla v0.8h, v1.8h, v2.h[5]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: not_really_vmlaq_laneq_s16_test:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: ext v2.16b, v2.16b, v0.16b, #8
+; CHECK-GI-NEXT: mla v0.8h, v1.8h, v2.h[1]
+; CHECK-GI-NEXT: ret
entry:
%shuffle1 = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%shuffle2 = shufflevector <4 x i16> %shuffle1, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -2239,10 +2525,16 @@ entry:
}
define <4 x i32> @not_really_vmlaq_laneq_s32_test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone ssp {
-; CHECK-LABEL: not_really_vmlaq_laneq_s32_test:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mla.4s v0, v1, v2[3]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: not_really_vmlaq_laneq_s32_test:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: mla v0.4s, v1.4s, v2.s[3]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: not_really_vmlaq_laneq_s32_test:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: ext v2.16b, v2.16b, v0.16b, #8
+; CHECK-GI-NEXT: mla v0.4s, v1.4s, v2.s[1]
+; CHECK-GI-NEXT: ret
entry:
%shuffle1 = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%shuffle2 = shufflevector <2 x i32> %shuffle1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -2254,7 +2546,7 @@ entry:
define <4 x i32> @vmull_laneq_s16_test(<4 x i16> %a, <8 x i16> %b) nounwind readnone ssp {
; CHECK-LABEL: vmull_laneq_s16_test:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: smull.4s v0, v0, v1[6]
+; CHECK-NEXT: smull v0.4s, v0.4h, v1.h[6]
; CHECK-NEXT: ret
entry:
%shuffle = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 6, i32 6, i32 6, i32 6>
@@ -2265,7 +2557,7 @@ entry:
define <2 x i64> @vmull_laneq_s32_test(<2 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
; CHECK-LABEL: vmull_laneq_s32_test:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: smull.2d v0, v0, v1[2]
+; CHECK-NEXT: smull v0.2d, v0.2s, v1.s[2]
; CHECK-NEXT: ret
entry:
%shuffle = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 2>
@@ -2275,7 +2567,7 @@ entry:
define <4 x i32> @vmull_laneq_u16_test(<4 x i16> %a, <8 x i16> %b) nounwind readnone ssp {
; CHECK-LABEL: vmull_laneq_u16_test:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: umull.4s v0, v0, v1[6]
+; CHECK-NEXT: umull v0.4s, v0.4h, v1.h[6]
; CHECK-NEXT: ret
entry:
%shuffle = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 6, i32 6, i32 6, i32 6>
@@ -2286,7 +2578,7 @@ entry:
define <2 x i64> @vmull_laneq_u32_test(<2 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
; CHECK-LABEL: vmull_laneq_u32_test:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: umull.2d v0, v0, v1[2]
+; CHECK-NEXT: umull v0.2d, v0.2s, v1.s[2]
; CHECK-NEXT: ret
entry:
%shuffle = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 2>
@@ -2297,8 +2589,8 @@ entry:
define <4 x i32> @vmull_low_n_s16_test(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c, i32 %d) nounwind readnone optsize ssp {
; CHECK-LABEL: vmull_low_n_s16_test:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: dup.4h v0, w0
-; CHECK-NEXT: smull.4s v0, v1, v0
+; CHECK-NEXT: dup v0.4h, w0
+; CHECK-NEXT: smull v0.4s, v1.4h, v0.4h
; CHECK-NEXT: ret
entry:
%conv = trunc i32 %d to i16
@@ -2314,11 +2606,18 @@ entry:
}
define <4 x i32> @vmull_high_n_s16_test(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c, i32 %d) nounwind readnone optsize ssp {
-; CHECK-LABEL: vmull_high_n_s16_test:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: dup.8h v0, w0
-; CHECK-NEXT: smull2.4s v0, v1, v0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vmull_high_n_s16_test:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: dup v0.8h, w0
+; CHECK-SD-NEXT: smull2 v0.4s, v1.8h, v0.8h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vmull_high_n_s16_test:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v1.d[1]
+; CHECK-GI-NEXT: dup v1.4h, w0
+; CHECK-GI-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-GI-NEXT: ret
entry:
%conv = trunc i32 %d to i16
%0 = bitcast <8 x i16> %b to <2 x i64>
@@ -2333,11 +2632,18 @@ entry:
}
define <2 x i64> @vmull_high_n_s32_test(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c, i32 %d) nounwind readnone optsize ssp {
-; CHECK-LABEL: vmull_high_n_s32_test:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: dup.4s v0, w0
-; CHECK-NEXT: smull2.2d v0, v1, v0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vmull_high_n_s32_test:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: dup v0.4s, w0
+; CHECK-SD-NEXT: smull2 v0.2d, v1.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vmull_high_n_s32_test:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v1.d[1]
+; CHECK-GI-NEXT: dup v1.2s, w0
+; CHECK-GI-NEXT: smull v0.2d, v0.2s, v1.2s
+; CHECK-GI-NEXT: ret
entry:
%0 = bitcast <4 x i32> %b to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2349,11 +2655,18 @@ entry:
}
define <4 x i32> @vmull_high_n_u16_test(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c, i32 %d) nounwind readnone optsize ssp {
-; CHECK-LABEL: vmull_high_n_u16_test:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: dup.8h v0, w0
-; CHECK-NEXT: umull2.4s v0, v1, v0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vmull_high_n_u16_test:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: dup v0.8h, w0
+; CHECK-SD-NEXT: umull2 v0.4s, v1.8h, v0.8h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vmull_high_n_u16_test:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v1.d[1]
+; CHECK-GI-NEXT: dup v1.4h, w0
+; CHECK-GI-NEXT: umull v0.4s, v0.4h, v1.4h
+; CHECK-GI-NEXT: ret
entry:
%conv = trunc i32 %d to i16
%0 = bitcast <8 x i16> %b to <2 x i64>
@@ -2368,11 +2681,18 @@ entry:
}
define <2 x i64> @vmull_high_n_u32_test(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c, i32 %d) nounwind readnone optsize ssp {
-; CHECK-LABEL: vmull_high_n_u32_test:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: dup.4s v0, w0
-; CHECK-NEXT: umull2.2d v0, v1, v0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vmull_high_n_u32_test:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: dup v0.4s, w0
+; CHECK-SD-NEXT: umull2 v0.2d, v1.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vmull_high_n_u32_test:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov d0, v1.d[1]
+; CHECK-GI-NEXT: dup v1.2s, w0
+; CHECK-GI-NEXT: umull v0.2d, v0.2s, v1.2s
+; CHECK-GI-NEXT: ret
entry:
%0 = bitcast <4 x i32> %b to <2 x i64>
%shuffle.i.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
@@ -2384,10 +2704,17 @@ entry:
}
define <4 x i32> @vmul_built_dup_test(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-LABEL: vmul_built_dup_test:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mul.4s v0, v0, v1[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vmul_built_dup_test:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mul v0.4s, v0.4s, v1.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vmul_built_dup_test:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov s1, v1.s[1]
+; CHECK-GI-NEXT: dup v1.4s, v1.s[0]
+; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: ret
%vget_lane = extractelement <4 x i32> %b, i32 1
%vecinit.i = insertelement <4 x i32> undef, i32 %vget_lane, i32 0
%vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %vget_lane, i32 1
@@ -2398,11 +2725,19 @@ define <4 x i32> @vmul_built_dup_test(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i16> @vmul_built_dup_fromsmall_test(<4 x i16> %a, <4 x i16> %b) {
-; CHECK-LABEL: vmul_built_dup_fromsmall_test:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mul.4h v0, v0, v1[3]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vmul_built_dup_fromsmall_test:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: mul v0.4h, v0.4h, v1.h[3]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vmul_built_dup_fromsmall_test:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h1, v1.h[3]
+; CHECK-GI-NEXT: dup v1.4h, v1.h[0]
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: ret
%vget_lane = extractelement <4 x i16> %b, i32 3
%vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0
%vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1
@@ -2413,11 +2748,18 @@ define <4 x i16> @vmul_built_dup_fromsmall_test(<4 x i16> %a, <4 x i16> %b) {
}
define <8 x i16> @vmulq_built_dup_fromsmall_test(<8 x i16> %a, <4 x i16> %b) {
-; CHECK-LABEL: vmulq_built_dup_fromsmall_test:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mul.8h v0, v0, v1[0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vmulq_built_dup_fromsmall_test:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: mul v0.8h, v0.8h, v1.h[0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vmulq_built_dup_fromsmall_test:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: dup v1.8h, v1.h[0]
+; CHECK-GI-NEXT: mul v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: ret
%vget_lane = extractelement <4 x i16> %b, i32 0
%vecinit.i = insertelement <8 x i16> undef, i16 %vget_lane, i32 0
%vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %vget_lane, i32 1
@@ -2434,7 +2776,7 @@ define <8 x i16> @vmulq_built_dup_fromsmall_test(<8 x i16> %a, <4 x i16> %b) {
define <2 x i64> @mull_from_two_extracts(<4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK-LABEL: mull_from_two_extracts:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmull2.2d v0, v0, v1
+; CHECK-NEXT: sqdmull2 v0.2d, v0.4s, v1.4s
; CHECK-NEXT: ret
%lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -2446,7 +2788,7 @@ define <2 x i64> @mull_from_two_extracts(<4 x i32> %lhs, <4 x i32> %rhs) {
define <2 x i64> @mlal_from_two_extracts(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK-LABEL: mlal_from_two_extracts:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal2.2d v0, v1, v2
+; CHECK-NEXT: sqdmlal2 v0.2d, v1.4s, v2.4s
; CHECK-NEXT: ret
%lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -2459,8 +2801,8 @@ define <2 x i64> @mlal_from_two_extracts(<2 x i64> %accum, <4 x i32> %lhs, <4 x
define <2 x i64> @mull_from_extract_dup_low(<4 x i32> %lhs, i32 %rhs) {
; CHECK-LABEL: mull_from_extract_dup_low:
; CHECK: // %bb.0:
-; CHECK-NEXT: dup.2s v1, w0
-; CHECK-NEXT: sqdmull.2d v0, v0, v1
+; CHECK-NEXT: dup v1.2s, w0
+; CHECK-NEXT: sqdmull v0.2d, v0.2s, v1.2s
; CHECK-NEXT: ret
%rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
%rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -2472,11 +2814,18 @@ define <2 x i64> @mull_from_extract_dup_low(<4 x i32> %lhs, i32 %rhs) {
}
define <2 x i64> @mull_from_extract_dup_high(<4 x i32> %lhs, i32 %rhs) {
-; CHECK-LABEL: mull_from_extract_dup_high:
-; CHECK: // %bb.0:
-; CHECK-NEXT: dup.4s v1, w0
-; CHECK-NEXT: sqdmull2.2d v0, v0, v1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mull_from_extract_dup_high:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: dup v1.4s, w0
+; CHECK-SD-NEXT: sqdmull2 v0.2d, v0.4s, v1.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mull_from_extract_dup_high:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: dup v1.2s, w0
+; CHECK-GI-NEXT: mov d0, v0.d[1]
+; CHECK-GI-NEXT: sqdmull v0.2d, v0.2s, v1.2s
+; CHECK-GI-NEXT: ret
%rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
%rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -2489,8 +2838,8 @@ define <2 x i64> @mull_from_extract_dup_high(<4 x i32> %lhs, i32 %rhs) {
define <8 x i16> @pmull_from_extract_dup_low(<16 x i8> %lhs, i8 %rhs) {
; CHECK-LABEL: pmull_from_extract_dup_low:
; CHECK: // %bb.0:
-; CHECK-NEXT: dup.8b v1, w0
-; CHECK-NEXT: pmull.8h v0, v0, v1
+; CHECK-NEXT: dup v1.8b, w0
+; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b
; CHECK-NEXT: ret
%rhsvec.0 = insertelement <8 x i8> undef, i8 %rhs, i32 0
%rhsvec = shufflevector <8 x i8> %rhsvec.0, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -2504,8 +2853,8 @@ define <8 x i16> @pmull_from_extract_dup_low(<16 x i8> %lhs, i8 %rhs) {
define <8 x i16> @pmull_from_extract_dup_high(<16 x i8> %lhs, i8 %rhs) {
; CHECK-LABEL: pmull_from_extract_dup_high:
; CHECK: // %bb.0:
-; CHECK-NEXT: dup.16b v1, w0
-; CHECK-NEXT: pmull2.8h v0, v0, v1
+; CHECK-NEXT: dup v1.16b, w0
+; CHECK-NEXT: pmull2 v0.8h, v0.16b, v1.16b
; CHECK-NEXT: ret
%rhsvec.0 = insertelement <8 x i8> undef, i8 %rhs, i32 0
%rhsvec = shufflevector <8 x i8> %rhsvec.0, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -2520,8 +2869,8 @@ define <8 x i16> @pmull_from_extract_duplane_low(<16 x i8> %lhs, <8 x i8> %rhs)
; CHECK-LABEL: pmull_from_extract_duplane_low:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: dup.8b v1, v1[0]
-; CHECK-NEXT: pmull.8h v0, v0, v1
+; CHECK-NEXT: dup v1.8b, v1.b[0]
+; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b
; CHECK-NEXT: ret
%lhs.high = shufflevector <16 x i8> %lhs, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%rhs.high = shufflevector <8 x i8> %rhs, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -2534,8 +2883,8 @@ define <8 x i16> @pmull_from_extract_duplane_high(<16 x i8> %lhs, <8 x i8> %rhs)
; CHECK-LABEL: pmull_from_extract_duplane_high:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: dup.16b v1, v1[0]
-; CHECK-NEXT: pmull2.8h v0, v0, v1
+; CHECK-NEXT: dup v1.16b, v1.b[0]
+; CHECK-NEXT: pmull2 v0.8h, v0.16b, v1.16b
; CHECK-NEXT: ret
%lhs.high = shufflevector <16 x i8> %lhs, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%rhs.high = shufflevector <8 x i8> %rhs, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -2547,7 +2896,7 @@ define <8 x i16> @pmull_from_extract_duplane_high(<16 x i8> %lhs, <8 x i8> %rhs)
define <2 x i64> @sqdmull_from_extract_duplane_low(<4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK-LABEL: sqdmull_from_extract_duplane_low:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmull.2d v0, v0, v1[0]
+; CHECK-NEXT: sqdmull v0.2d, v0.2s, v1.s[0]
; CHECK-NEXT: ret
%lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0>
@@ -2557,10 +2906,16 @@ define <2 x i64> @sqdmull_from_extract_duplane_low(<4 x i32> %lhs, <4 x i32> %rh
}
define <2 x i64> @sqdmull_from_extract_duplane_high(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK-LABEL: sqdmull_from_extract_duplane_high:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmull2.2d v0, v0, v1[0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmull_from_extract_duplane_high:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmull2 v0.2d, v0.4s, v1.s[0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmull_from_extract_duplane_high:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d0, v0.d[1]
+; CHECK-GI-NEXT: sqdmull v0.2d, v0.2s, v1.s[0]
+; CHECK-GI-NEXT: ret
%lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0>
@@ -2571,7 +2926,7 @@ define <2 x i64> @sqdmull_from_extract_duplane_high(<4 x i32> %lhs, <4 x i32> %r
define <2 x i64> @sqdmlal_from_extract_duplane_low(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK-LABEL: sqdmlal_from_extract_duplane_low:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal.2d v0, v1, v2[0]
+; CHECK-NEXT: sqdmlal v0.2d, v1.2s, v2.s[0]
; CHECK-NEXT: ret
%lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0>
@@ -2582,10 +2937,16 @@ define <2 x i64> @sqdmlal_from_extract_duplane_low(<2 x i64> %accum, <4 x i32> %
}
define <2 x i64> @sqdmlal_from_extract_duplane_high(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK-LABEL: sqdmlal_from_extract_duplane_high:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal2.2d v0, v1, v2[0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlal_from_extract_duplane_high:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmlal2 v0.2d, v1.4s, v2.s[0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlal_from_extract_duplane_high:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d1, v1.d[1]
+; CHECK-GI-NEXT: sqdmlal v0.2d, v1.2s, v2.s[0]
+; CHECK-GI-NEXT: ret
%lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0>
@@ -2597,7 +2958,7 @@ define <2 x i64> @sqdmlal_from_extract_duplane_high(<2 x i64> %accum, <4 x i32>
define <2 x i64> @umlal_from_extract_duplane_low(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK-LABEL: umlal_from_extract_duplane_low:
; CHECK: // %bb.0:
-; CHECK-NEXT: umlal.2d v0, v1, v2[0]
+; CHECK-NEXT: umlal v0.2d, v1.2s, v2.s[0]
; CHECK-NEXT: ret
%lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0>
@@ -2608,10 +2969,16 @@ define <2 x i64> @umlal_from_extract_duplane_low(<2 x i64> %accum, <4 x i32> %lh
}
define <2 x i64> @umlal_from_extract_duplane_high(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK-LABEL: umlal_from_extract_duplane_high:
-; CHECK: // %bb.0:
-; CHECK-NEXT: umlal2.2d v0, v1, v2[0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: umlal_from_extract_duplane_high:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: umlal2 v0.2d, v1.4s, v2.s[0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: umlal_from_extract_duplane_high:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d1, v1.d[1]
+; CHECK-GI-NEXT: umlal v0.2d, v1.2s, v2.s[0]
+; CHECK-GI-NEXT: ret
%lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0>
@@ -2623,7 +2990,7 @@ define <2 x i64> @umlal_from_extract_duplane_high(<2 x i64> %accum, <4 x i32> %l
define float @scalar_fmla_from_extract_v4f32(float %accum, float %lhs, <4 x float> %rvec) {
; CHECK-LABEL: scalar_fmla_from_extract_v4f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmla.s s0, s1, v2[3]
+; CHECK-NEXT: fmla s0, s1, v2.s[3]
; CHECK-NEXT: ret
%rhs = extractelement <4 x float> %rvec, i32 3
%res = call float @llvm.fma.f32(float %lhs, float %rhs, float %accum)
@@ -2631,11 +2998,18 @@ define float @scalar_fmla_from_extract_v4f32(float %accum, float %lhs, <4 x floa
}
define float @scalar_fmla_from_extract_v2f32(float %accum, float %lhs, <2 x float> %rvec) {
-; CHECK-LABEL: scalar_fmla_from_extract_v2f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: fmla.s s0, s1, v2[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_fmla_from_extract_v2f32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: fmla s0, s1, v2.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_fmla_from_extract_v2f32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: mov s2, v2.s[1]
+; CHECK-GI-NEXT: fmadd s0, s1, s2, s0
+; CHECK-GI-NEXT: ret
%rhs = extractelement <2 x float> %rvec, i32 1
%res = call float @llvm.fma.f32(float %lhs, float %rhs, float %accum)
ret float %res
@@ -2644,7 +3018,7 @@ define float @scalar_fmla_from_extract_v2f32(float %accum, float %lhs, <2 x floa
define float @scalar_fmls_from_extract_v4f32(float %accum, float %lhs, <4 x float> %rvec) {
; CHECK-LABEL: scalar_fmls_from_extract_v4f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmls.s s0, s1, v2[3]
+; CHECK-NEXT: fmls s0, s1, v2.s[3]
; CHECK-NEXT: ret
%rhs.scal = extractelement <4 x float> %rvec, i32 3
%rhs = fsub float -0.0, %rhs.scal
@@ -2656,7 +3030,7 @@ define float @scalar_fmls_from_extract_v2f32(float %accum, float %lhs, <2 x floa
; CHECK-LABEL: scalar_fmls_from_extract_v2f32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: fmls.s s0, s1, v2[1]
+; CHECK-NEXT: fmls s0, s1, v2.s[1]
; CHECK-NEXT: ret
%rhs.scal = extractelement <2 x float> %rvec, i32 1
%rhs = fsub float -0.0, %rhs.scal
@@ -2669,7 +3043,7 @@ declare float @llvm.fma.f32(float, float, float)
define double @scalar_fmla_from_extract_v2f64(double %accum, double %lhs, <2 x double> %rvec) {
; CHECK-LABEL: scalar_fmla_from_extract_v2f64:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmla.d d0, d1, v2[1]
+; CHECK-NEXT: fmla d0, d1, v2.d[1]
; CHECK-NEXT: ret
%rhs = extractelement <2 x double> %rvec, i32 1
%res = call double @llvm.fma.f64(double %lhs, double %rhs, double %accum)
@@ -2679,7 +3053,7 @@ define double @scalar_fmla_from_extract_v2f64(double %accum, double %lhs, <2 x d
define double @scalar_fmls_from_extract_v2f64(double %accum, double %lhs, <2 x double> %rvec) {
; CHECK-LABEL: scalar_fmls_from_extract_v2f64:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmls.d d0, d1, v2[1]
+; CHECK-NEXT: fmls d0, d1, v2.d[1]
; CHECK-NEXT: ret
%rhs.scal = extractelement <2 x double> %rvec, i32 1
%rhs = fsub double -0.0, %rhs.scal
@@ -2692,7 +3066,7 @@ declare double @llvm.fma.f64(double, double, double)
define <2 x float> @fmls_with_fneg_before_extract_v2f32(<2 x float> %accum, <2 x float> %lhs, <4 x float> %rhs) {
; CHECK-LABEL: fmls_with_fneg_before_extract_v2f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmls.2s v0, v1, v2[3]
+; CHECK-NEXT: fmls v0.2s, v1.2s, v2.s[3]
; CHECK-NEXT: ret
%rhs_neg = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %rhs
%splat = shufflevector <4 x float> %rhs_neg, <4 x float> undef, <2 x i32> <i32 3, i32 3>
@@ -2704,7 +3078,7 @@ define <2 x float> @fmls_with_fneg_before_extract_v2f32_1(<2 x float> %accum, <2
; CHECK-LABEL: fmls_with_fneg_before_extract_v2f32_1:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: fmls.2s v0, v1, v2[1]
+; CHECK-NEXT: fmls v0.2s, v1.2s, v2.s[1]
; CHECK-NEXT: ret
%rhs_neg = fsub <2 x float> <float -0.0, float -0.0>, %rhs
%splat = shufflevector <2 x float> %rhs_neg, <2 x float> undef, <2 x i32> <i32 1, i32 1>
@@ -2715,7 +3089,7 @@ define <2 x float> @fmls_with_fneg_before_extract_v2f32_1(<2 x float> %accum, <2
define <4 x float> @fmls_with_fneg_before_extract_v4f32(<4 x float> %accum, <4 x float> %lhs, <4 x float> %rhs) {
; CHECK-LABEL: fmls_with_fneg_before_extract_v4f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmls.4s v0, v1, v2[3]
+; CHECK-NEXT: fmls v0.4s, v1.4s, v2.s[3]
; CHECK-NEXT: ret
%rhs_neg = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %rhs
%splat = shufflevector <4 x float> %rhs_neg, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
@@ -2727,7 +3101,7 @@ define <4 x float> @fmls_with_fneg_before_extract_v4f32_1(<4 x float> %accum, <4
; CHECK-LABEL: fmls_with_fneg_before_extract_v4f32_1:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: fmls.4s v0, v1, v2[1]
+; CHECK-NEXT: fmls v0.4s, v1.4s, v2.s[1]
; CHECK-NEXT: ret
%rhs_neg = fsub <2 x float> <float -0.0, float -0.0>, %rhs
%splat = shufflevector <2 x float> %rhs_neg, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -2738,7 +3112,7 @@ define <4 x float> @fmls_with_fneg_before_extract_v4f32_1(<4 x float> %accum, <4
define <2 x double> @fmls_with_fneg_before_extract_v2f64(<2 x double> %accum, <2 x double> %lhs, <2 x double> %rhs) {
; CHECK-LABEL: fmls_with_fneg_before_extract_v2f64:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmls.2d v0, v1, v2[1]
+; CHECK-NEXT: fmls v0.2d, v1.2d, v2.d[1]
; CHECK-NEXT: ret
%rhs_neg = fsub <2 x double> <double -0.0, double -0.0>, %rhs
%splat = shufflevector <2 x double> %rhs_neg, <2 x double> undef, <2 x i32> <i32 1, i32 1>
@@ -2770,7 +3144,7 @@ define i32 @sqdmlal_s(i16 %A, i16 %B, i32 %C) nounwind {
; CHECK-NEXT: fmov s0, w0
; CHECK-NEXT: fmov s1, w1
; CHECK-NEXT: fmov s2, w2
-; CHECK-NEXT: sqdmlal.h s2, h0, v1[0]
+; CHECK-NEXT: sqdmlal s2, h0, v1.h[0]
; CHECK-NEXT: fmov w0, s2
; CHECK-NEXT: ret
%tmp1 = insertelement <4 x i16> undef, i16 %A, i64 0
@@ -2801,7 +3175,7 @@ define i32 @sqdmlsl_s(i16 %A, i16 %B, i32 %C) nounwind {
; CHECK-NEXT: fmov s0, w0
; CHECK-NEXT: fmov s1, w1
; CHECK-NEXT: fmov s2, w2
-; CHECK-NEXT: sqdmlsl.h s2, h0, v1[0]
+; CHECK-NEXT: sqdmlsl s2, h0, v1.h[0]
; CHECK-NEXT: fmov w0, s2
; CHECK-NEXT: ret
%tmp1 = insertelement <4 x i16> undef, i16 %A, i64 0
@@ -2831,7 +3205,7 @@ define <16 x i8> @test_pmull_64(i64 %l, i64 %r) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: fmov d0, x1
; CHECK-NEXT: fmov d1, x0
-; CHECK-NEXT: pmull.1q v0, v1, v0
+; CHECK-NEXT: pmull v0.1q, v1.1d, v0.1d
; CHECK-NEXT: ret
%val = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l, i64 %r)
ret <16 x i8> %val
@@ -2840,7 +3214,7 @@ define <16 x i8> @test_pmull_64(i64 %l, i64 %r) nounwind {
define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind {
; CHECK-LABEL: test_pmull_high_64:
; CHECK: // %bb.0:
-; CHECK-NEXT: pmull2.1q v0, v0, v1
+; CHECK-NEXT: pmull2 v0.1q, v0.2d, v1.2d
; CHECK-NEXT: ret
%l_hi = extractelement <2 x i64> %l, i32 1
%r_hi = extractelement <2 x i64> %r, i32 1
@@ -2851,15 +3225,23 @@ define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind {
declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64)
define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind {
-; CHECK-LABEL: test_mul_v1i64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: fmov x8, d1
-; CHECK-NEXT: fmov x9, d0
-; CHECK-NEXT: mul x8, x9, x8
-; CHECK-NEXT: fmov d0, x8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_mul_v1i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: fmov x8, d1
+; CHECK-SD-NEXT: fmov x9, d0
+; CHECK-SD-NEXT: mul x8, x9, x8
+; CHECK-SD-NEXT: fmov d0, x8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_mul_v1i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: fmov x8, d0
+; CHECK-GI-NEXT: fmov x9, d1
+; CHECK-GI-NEXT: mul x8, x8, x9
+; CHECK-GI-NEXT: fmov d0, x8
+; CHECK-GI-NEXT: ret
%prod = mul <1 x i64> %lhs, %rhs
ret <1 x i64> %prod
}
@@ -2867,7 +3249,7 @@ define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind {
define <4 x i32> @sqdmlal4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %v2) {
; CHECK-LABEL: sqdmlal4s_lib:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal.4s v0, v1, v2
+; CHECK-NEXT: sqdmlal v0.4s, v1.4h, v2.4h
; CHECK-NEXT: ret
%tmp = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %v1, <4 x i16> %v2)
%sum = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %dst, <4 x i32> %tmp)
@@ -2877,7 +3259,7 @@ define <4 x i32> @sqdmlal4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %v2) {
define <2 x i64> @sqdmlal2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %v2) {
; CHECK-LABEL: sqdmlal2d_lib:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal.2d v0, v1, v2
+; CHECK-NEXT: sqdmlal v0.2d, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %v1, <2 x i32> %v2)
%sum = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %dst, <2 x i64> %tmp)
@@ -2887,7 +3269,7 @@ define <2 x i64> @sqdmlal2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %v2) {
define <4 x i32> @sqdmlal2_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) {
; CHECK-LABEL: sqdmlal2_4s_lib:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal2.4s v0, v1, v2
+; CHECK-NEXT: sqdmlal2 v0.4s, v1.8h, v2.8h
; CHECK-NEXT: ret
%tmp0 = shufflevector <8 x i16> %v1, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp1 = shufflevector <8 x i16> %v2, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -2899,7 +3281,7 @@ define <4 x i32> @sqdmlal2_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2)
define <2 x i64> @sqdmlal2_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32> %v2) {
; CHECK-LABEL: sqdmlal2_2d_lib:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal2.2d v0, v1, v2
+; CHECK-NEXT: sqdmlal2 v0.2d, v1.4s, v2.4s
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i32> %v1, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
%tmp1 = shufflevector <4 x i32> %v2, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
@@ -2912,7 +3294,7 @@ define <4 x i32> @sqdmlal_lane_4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %
; CHECK-LABEL: sqdmlal_lane_4s_lib:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: sqdmlal.4s v0, v1, v2[3]
+; CHECK-NEXT: sqdmlal v0.4s, v1.4h, v2.h[3]
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i16> %v2, <4 x i16> poison, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
%tmp1 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %v1, <4 x i16> %tmp0)
@@ -2924,7 +3306,7 @@ define <2 x i64> @sqdmlal_lane_2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %
; CHECK-LABEL: sqdmlal_lane_2d_lib:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: sqdmlal.2d v0, v1, v2[1]
+; CHECK-NEXT: sqdmlal v0.2d, v1.2s, v2.s[1]
; CHECK-NEXT: ret
%tmp0 = shufflevector <2 x i32> %v2, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp1 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %v1, <2 x i32> %tmp0)
@@ -2933,10 +3315,16 @@ define <2 x i64> @sqdmlal_lane_2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %
}
define <4 x i32> @sqdmlal2_lane_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) {
-; CHECK-LABEL: sqdmlal2_lane_4s_lib:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal2.4s v0, v1, v2[7]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlal2_lane_4s_lib:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmlal2 v0.4s, v1.8h, v2.h[7]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlal2_lane_4s_lib:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d1, v1.d[1]
+; CHECK-GI-NEXT: sqdmlal v0.4s, v1.4h, v2.h[7]
+; CHECK-GI-NEXT: ret
%tmp0 = shufflevector <8 x i16> %v1, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp1 = shufflevector <8 x i16> %v2, <8 x i16> poison, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
%tmp2 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp0, <4 x i16> %tmp1)
@@ -2945,10 +3333,16 @@ define <4 x i32> @sqdmlal2_lane_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16>
}
define <2 x i64> @sqdmlal2_lane_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32> %v2) {
-; CHECK-LABEL: sqdmlal2_lane_2d_lib:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlal2.2d v0, v1, v2[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlal2_lane_2d_lib:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmlal2 v0.2d, v1.4s, v2.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlal2_lane_2d_lib:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d1, v1.d[1]
+; CHECK-GI-NEXT: sqdmlal v0.2d, v1.2s, v2.s[1]
+; CHECK-GI-NEXT: ret
%tmp0 = shufflevector <4 x i32> %v1, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
%tmp1 = shufflevector <4 x i32> %v2, <4 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp2 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp0, <2 x i32> %tmp1)
@@ -2959,7 +3353,7 @@ define <2 x i64> @sqdmlal2_lane_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32>
define <4 x i32> @sqdmlsl4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %v2) {
; CHECK-LABEL: sqdmlsl4s_lib:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlsl.4s v0, v1, v2
+; CHECK-NEXT: sqdmlsl v0.4s, v1.4h, v2.4h
; CHECK-NEXT: ret
%tmp = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %v1, <4 x i16> %v2)
%sum = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %dst, <4 x i32> %tmp)
@@ -2969,7 +3363,7 @@ define <4 x i32> @sqdmlsl4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %v2) {
define <2 x i64> @sqdmlsl2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %v2) {
; CHECK-LABEL: sqdmlsl2d_lib:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlsl.2d v0, v1, v2
+; CHECK-NEXT: sqdmlsl v0.2d, v1.2s, v2.2s
; CHECK-NEXT: ret
%tmp = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %v1, <2 x i32> %v2)
%sum = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %dst, <2 x i64> %tmp)
@@ -2979,7 +3373,7 @@ define <2 x i64> @sqdmlsl2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %v2) {
define <4 x i32> @sqdmlsl2_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) {
; CHECK-LABEL: sqdmlsl2_4s_lib:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlsl2.4s v0, v1, v2
+; CHECK-NEXT: sqdmlsl2 v0.4s, v1.8h, v2.8h
; CHECK-NEXT: ret
%tmp0 = shufflevector <8 x i16> %v1, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp1 = shufflevector <8 x i16> %v2, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -2991,7 +3385,7 @@ define <4 x i32> @sqdmlsl2_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2)
define <2 x i64> @sqdmlsl2_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32> %v2) {
; CHECK-LABEL: sqdmlsl2_2d_lib:
; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlsl2.2d v0, v1, v2
+; CHECK-NEXT: sqdmlsl2 v0.2d, v1.4s, v2.4s
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i32> %v1, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
%tmp1 = shufflevector <4 x i32> %v2, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
@@ -3004,7 +3398,7 @@ define <4 x i32> @sqdmlsl_lane_4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %
; CHECK-LABEL: sqdmlsl_lane_4s_lib:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: sqdmlsl.4s v0, v1, v2[3]
+; CHECK-NEXT: sqdmlsl v0.4s, v1.4h, v2.h[3]
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i16> %v2, <4 x i16> poison, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
%tmp1 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %v1, <4 x i16> %tmp0)
@@ -3016,7 +3410,7 @@ define <2 x i64> @sqdmlsl_lane_2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %
; CHECK-LABEL: sqdmlsl_lane_2d_lib:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: sqdmlsl.2d v0, v1, v2[1]
+; CHECK-NEXT: sqdmlsl v0.2d, v1.2s, v2.s[1]
; CHECK-NEXT: ret
%tmp0 = shufflevector <2 x i32> %v2, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp1 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %v1, <2 x i32> %tmp0)
@@ -3025,10 +3419,16 @@ define <2 x i64> @sqdmlsl_lane_2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %
}
define <4 x i32> @sqdmlsl2_lane_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) {
-; CHECK-LABEL: sqdmlsl2_lane_4s_lib:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlsl2.4s v0, v1, v2[7]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlsl2_lane_4s_lib:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmlsl2 v0.4s, v1.8h, v2.h[7]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlsl2_lane_4s_lib:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d1, v1.d[1]
+; CHECK-GI-NEXT: sqdmlsl v0.4s, v1.4h, v2.h[7]
+; CHECK-GI-NEXT: ret
%tmp0 = shufflevector <8 x i16> %v1, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp1 = shufflevector <8 x i16> %v2, <8 x i16> poison, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
%tmp2 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp0, <4 x i16> %tmp1)
@@ -3037,10 +3437,16 @@ define <4 x i32> @sqdmlsl2_lane_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16>
}
define <2 x i64> @sqdmlsl2_lane_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32> %v2) {
-; CHECK-LABEL: sqdmlsl2_lane_2d_lib:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sqdmlsl2.2d v0, v1, v2[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: sqdmlsl2_lane_2d_lib:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sqdmlsl2 v0.2d, v1.4s, v2.s[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: sqdmlsl2_lane_2d_lib:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov d1, v1.d[1]
+; CHECK-GI-NEXT: sqdmlsl v0.2d, v1.2s, v2.s[1]
+; CHECK-GI-NEXT: ret
%tmp0 = shufflevector <4 x i32> %v1, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
%tmp1 = shufflevector <4 x i32> %v2, <4 x i32> poison, <2 x i32> <i32 1, i32 1>
%tmp2 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp0, <2 x i32> %tmp1)
diff --git a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
index 634d1b9..5f5b27a 100644
--- a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
@@ -59,37 +59,33 @@ bb27: ; preds = %bb9, %bb8
define void @avoid_promotion_2_and(ptr nocapture noundef %arg) {
; CHECK-LABEL: avoid_promotion_2_and:
; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: add x8, x0, #32
-; CHECK-NEXT: b LBB1_2
-; CHECK-NEXT: LBB1_1: ; %latch
-; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: cmp w9, #2
-; CHECK-NEXT: add x8, x8, #56
-; CHECK-NEXT: b.ls LBB1_4
-; CHECK-NEXT: LBB1_2: ; %loop
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: add x9, x0, #32
+; CHECK-NEXT: LBB1_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr w9, [x8, #20]
-; CHECK-NEXT: cmp w9, #3
-; CHECK-NEXT: b.lo LBB1_1
-; CHECK-NEXT: ; %bb.3: ; %then
-; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: ldp w13, w12, [x8, #12]
-; CHECK-NEXT: ldr w10, [x8]
+; CHECK-NEXT: ldr w10, [x9, #20]
+; CHECK-NEXT: cmp w10, #3
+; CHECK-NEXT: b.lo LBB1_3
+; CHECK-NEXT: ; %bb.2: ; %then
+; CHECK-NEXT: ; in Loop: Header=BB1_1 Depth=1
+; CHECK-NEXT: ldp w13, w12, [x9, #12]
+; CHECK-NEXT: ldr w10, [x9]
; CHECK-NEXT: ldr x11, [x0]
-; CHECK-NEXT: ldr w14, [x8, #8]
+; CHECK-NEXT: add x8, x8, #1
+; CHECK-NEXT: ldr w14, [x9, #8]
; CHECK-NEXT: lsl w10, w10, w13
; CHECK-NEXT: ldrb w11, [x11, x12]
; CHECK-NEXT: eor w10, w10, w11
-; CHECK-NEXT: ldur w11, [x8, #-24]
+; CHECK-NEXT: ldur w11, [x9, #-24]
; CHECK-NEXT: and w10, w10, w14
-; CHECK-NEXT: ldp x14, x13, [x8, #-16]
-; CHECK-NEXT: str w10, [x8]
+; CHECK-NEXT: ldp x14, x13, [x9, #-16]
+; CHECK-NEXT: str w10, [x9], #56
; CHECK-NEXT: and w11, w11, w12
; CHECK-NEXT: ldrh w15, [x13, w10, uxtw #1]
; CHECK-NEXT: strh w15, [x14, w11, uxtw #1]
; CHECK-NEXT: strh w12, [x13, w10, uxtw #1]
; CHECK-NEXT: b LBB1_1
-; CHECK-NEXT: LBB1_4: ; %exit
+; CHECK-NEXT: LBB1_3: ; %exit.critedge
; CHECK-NEXT: ret
entry:
br label %loop
diff --git a/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..c4c54175
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..b47607e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..94b657c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
index 5765e0a..b3ce9d2 100644
--- a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
@@ -1,14 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "arm64"
define i1 @test_EQ_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IllEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IllEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IllEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn x1, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp eq i64 %add, %a
@@ -16,11 +23,19 @@ entry:
}
define i1 @test_EQ_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IliEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IliEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, w1, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IliEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: sxtw x8, w1
+; CHECK-GI-NEXT: cmn x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -55,11 +70,19 @@ entry:
}
define i1 @test_EQ_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IilEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IilEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x1, w0, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IilEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: sxtw x8, w0
+; CHECK-GI-NEXT: cmn x8, x1
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -68,11 +91,17 @@ entry:
}
define i1 @test_EQ_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IiiEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn w0, w1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IiiEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IiiEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp eq i32 %add, %a
@@ -218,11 +247,17 @@ entry:
}
define i1 @test_NE_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IllEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, x1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IllEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, x1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IllEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn x1, x0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp ne i64 %add, %a
@@ -230,11 +265,19 @@ entry:
}
define i1 @test_NE_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IliEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IliEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, w1, sxtw
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IliEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: sxtw x8, w1
+; CHECK-GI-NEXT: cmn x8, x0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -269,11 +312,19 @@ entry:
}
define i1 @test_NE_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IilEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IilEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x1, w0, sxtw
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IilEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: sxtw x8, w0
+; CHECK-GI-NEXT: cmn x8, x1
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -282,11 +333,17 @@ entry:
}
define i1 @test_NE_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IiiEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn w0, w1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IiiEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IiiEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp ne i32 %add, %a
@@ -444,161 +501,281 @@ define i1 @cmn_large_imm(i32 %a) {
}
define i1 @almost_immediate_neg_slt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_slt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, le
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_slt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_slt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
%cmp = icmp slt i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_slt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_slt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, le
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_slt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_slt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
%cmp = icmp slt i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sge(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sge:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, gt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sge:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sge:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, ge
+; CHECK-GI-NEXT: ret
%cmp = icmp sge i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sge_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sge_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, gt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sge_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sge_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, ge
+; CHECK-GI-NEXT: ret
%cmp = icmp sge i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_uge(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_uge:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_uge:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_uge:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%cmp = icmp uge i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_uge_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_uge_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_uge_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_uge_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%cmp = icmp uge i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_ult(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ult:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, ls
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ult:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ult:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%cmp = icmp ult i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_ult_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ult_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, ls
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ult_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ult_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%cmp = icmp ult i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sle(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sle:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sle:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sle:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, le
+; CHECK-GI-NEXT: ret
%cmp = icmp sle i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sle_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sle_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sle_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sle_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, le
+; CHECK-GI-NEXT: ret
%cmp = icmp sle i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sgt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sgt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, ge
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sgt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sgt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, gt
+; CHECK-GI-NEXT: ret
%cmp = icmp sgt i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sgt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sgt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, ge
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sgt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sgt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, gt
+; CHECK-GI-NEXT: ret
%cmp = icmp sgt i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ule(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ule:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lo
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ule:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ule:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, ls
+; CHECK-GI-NEXT: ret
%cmp = icmp ule i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ule_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ule_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lo
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ule_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ule_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, ls
+; CHECK-GI-NEXT: ret
%cmp = icmp ule i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ugt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ugt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ugt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ugt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, hi
+; CHECK-GI-NEXT: ret
%cmp = icmp ugt i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ugt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ugt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ugt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ugt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, hi
+; CHECK-GI-NEXT: ret
%cmp = icmp ugt i64 %x, -16773121
ret i1 %cmp
}
@@ -637,6 +814,24 @@ define i1 @cmn_nsw_neg(i32 %a, i32 %b) {
ret i1 %cmp
}
+define i1 @cmn_swap(i32 %a, i32 %b) {
+; CHECK-SD-LABEL: cmn_swap:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmn_swap:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
+ %sub = sub nsw i32 0, %b
+ %cmp = icmp sgt i32 %sub, %a
+ ret i1 %cmp
+}
+
+
define i1 @cmn_nsw_neg_64(i64 %a, i64 %b) {
; CHECK-LABEL: cmn_nsw_neg_64:
; CHECK: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/combine-and-like.ll b/llvm/test/CodeGen/AArch64/combine-and-like.ll
index 15770c2..ea1359b 100644
--- a/llvm/test/CodeGen/AArch64/combine-and-like.ll
+++ b/llvm/test/CodeGen/AArch64/combine-and-like.ll
@@ -4,7 +4,6 @@
define i32 @f(i32 %a0) {
; CHECK-LABEL: f:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
%1 = lshr i32 %a0, 2147483647
%2 = add i32 %1, 2147483647
diff --git a/llvm/test/CodeGen/AArch64/constant-pool-partition.ll b/llvm/test/CodeGen/AArch64/constant-pool-partition.ll
index d444713..9f4b3e2 100644
--- a/llvm/test/CodeGen/AArch64/constant-pool-partition.ll
+++ b/llvm/test/CodeGen/AArch64/constant-pool-partition.ll
@@ -19,11 +19,11 @@
; function, constant pools for this constant should not have `.unlikely` suffix.
;; Constant pools for function @cold_func.
-; CHECK: .section .rodata.cst8.hot,"aM",@progbits,8
+; CHECK: .section .rodata.cst8.hot.,"aM",@progbits,8
; CHECK-NEXT: .p2align
; CHECK-NEXT: .LCPI0_0:
; CHECK-NEXT: .xword 0x3fe5c28f5c28f5c3 // double 0.68000000000000005
-; CHECK-NEXT: .section .rodata.cst8.unlikely,"aM",@progbits,8
+; CHECK-NEXT: .section .rodata.cst8.unlikely.,"aM",@progbits,8
; CHECK-NEXT: .p2align
; CHECK-NEXT: .LCPI0_1:
; CHECK-NEXT: .xword 0x3fe5eb851eb851ec // double 0.68500000000000005
@@ -58,7 +58,7 @@
; CHECK-NEXT: .word 3 // 0x3
; CHECK-NEXT: .word 5 // 0x5
; CHECK-NEXT: .word 7 // 0x7
-; CHECK-NEXT: .section .rodata.cst16.hot,"aM",@progbits,16
+; CHECK-NEXT: .section .rodata.cst16.hot.,"aM",@progbits,16
; CHECK-NEXT: .p2align
; CHECK-NEXT: .LCPI1_2:
; CHECK-NEXT: .word 442 // 0x1ba
@@ -67,11 +67,11 @@
; CHECK-NEXT: .word 0 // 0x0
;; Constant pools for function @hot_func
-; CHECK: .section .rodata.cst8.hot,"aM",@progbits,8
+; CHECK: .section .rodata.cst8.hot.,"aM",@progbits,8
; CHECK-NEXT: .p2align
; CHECK-NEXT: .LCPI2_0:
; CHECK-NEXT: .xword 0x3fe5c28f5c28f5c3 // double 0.68000000000000005
-; CHECK-NEXT: .section .rodata.cst16.hot,"aM",@progbits,16
+; CHECK-NEXT: .section .rodata.cst16.hot.,"aM",@progbits,16
; CHECK-NEXT: .p2align
; CHECK-NEXT: .LCPI2_1:
; CHECK-NEXT: .word 0 // 0x0
diff --git a/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll b/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll
index 0f208f8..374def5 100644
--- a/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll
+++ b/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios < %s | FileCheck %s
+; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios -enable-subreg-liveness=false < %s | sed -e "/; kill: /d" | FileCheck %s
+; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios -enable-subreg-liveness=true < %s | FileCheck %s
; Check there's no assert in spilling from implicit-def operands on an
; IMPLICIT_DEF.
@@ -92,7 +93,6 @@ define void @widget(i32 %arg, i32 %arg1, ptr %arg2, ptr %arg3, ptr %arg4, i32 %a
; CHECK-NEXT: ldr x8, [sp, #40] ; 8-byte Folded Reload
; CHECK-NEXT: mov x0, xzr
; CHECK-NEXT: mov x1, xzr
-; CHECK-NEXT: ; kill: def $w8 killed $w8 killed $x8 def $x8
; CHECK-NEXT: str x8, [sp]
; CHECK-NEXT: bl _fprintf
; CHECK-NEXT: brk #0x1
diff --git a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
index c4a027c..381904f 100644
--- a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
+++ b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
@@ -25,77 +25,58 @@ define void @test_interp(ptr %frame, ptr %dst) {
; CHECK-NEXT: adrp x21, _opcode.targets@PAGE
; CHECK-NEXT: Lloh1:
; CHECK-NEXT: add x21, x21, _opcode.targets@PAGEOFF
-; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: mov x24, xzr
; CHECK-NEXT: add x8, x21, xzr, lsl #3
; CHECK-NEXT: mov x19, x1
; CHECK-NEXT: mov x20, x0
-; CHECK-NEXT: add x23, x22, #1
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: mov w22, #1 ; =0x1
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp0: ; Block address taken
; CHECK-NEXT: LBB0_1: ; %loop.header
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add x8, x21, x23, lsl #3
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
-; CHECK-NEXT: mov x22, xzr
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp1: ; Block address taken
; CHECK-NEXT: LBB0_2: ; %op1.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: str xzr, [x19]
-; CHECK-NEXT: mov w8, #1 ; =0x1
+; CHECK-NEXT: Ltmp2: ; Block address taken
+; CHECK-NEXT: LBB0_3: ; %op6.bb
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr x0, [x20, #-8]!
-; CHECK-NEXT: ldr x9, [x0, #8]
-; CHECK-NEXT: str x8, [x0]
-; CHECK-NEXT: ldr x8, [x9, #48]
+; CHECK-NEXT: ldr x8, [x0, #8]
+; CHECK-NEXT: str x22, [x0]
+; CHECK-NEXT: ldr x8, [x8, #48]
; CHECK-NEXT: blr x8
-; CHECK-NEXT: add x8, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: add x8, x21, x24, lsl #3
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
-; CHECK-NEXT: Ltmp2: ; Block address taken
-; CHECK-NEXT: LBB0_3: ; %op2.bb
+; CHECK-NEXT: Ltmp3: ; Block address taken
+; CHECK-NEXT: LBB0_4: ; %op2.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add x8, x21, x23, lsl #3
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: str x23, [x19]
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
-; CHECK-NEXT: Ltmp3: ; Block address taken
-; CHECK-NEXT: LBB0_4: ; %op4.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: add x10, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: ldur x8, [x22, #12]
-; CHECK-NEXT: ldur x9, [x20, #-8]
-; CHECK-NEXT: add x22, x22, #20
-; CHECK-NEXT: stp x8, x9, [x20, #-8]
-; CHECK-NEXT: add x20, x20, #8
-; CHECK-NEXT: br x10
; CHECK-NEXT: Ltmp4: ; Block address taken
-; CHECK-NEXT: LBB0_5: ; %op5.bb
+; CHECK-NEXT: LBB0_5: ; %op4.bb
+; CHECK-NEXT: Ltmp5: ; Block address taken
+; CHECK-NEXT: LBB0_6: ; %op5.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: add x10, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: ldur x8, [x22, #12]
+; CHECK-NEXT: str x23, [x19]
+; CHECK-NEXT: ldur x8, [x23, #12]
; CHECK-NEXT: ldur x9, [x20, #-8]
-; CHECK-NEXT: add x22, x22, #20
+; CHECK-NEXT: add x23, x23, #20
; CHECK-NEXT: stp x8, x9, [x20, #-8]
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: add x20, x20, #8
-; CHECK-NEXT: br x10
-; CHECK-NEXT: Ltmp5: ; Block address taken
-; CHECK-NEXT: LBB0_6: ; %op6.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr x0, [x20, #-8]!
-; CHECK-NEXT: mov w8, #1 ; =0x1
-; CHECK-NEXT: ldr x9, [x0, #8]
-; CHECK-NEXT: str x8, [x0]
-; CHECK-NEXT: ldr x8, [x9, #48]
-; CHECK-NEXT: blr x8
-; CHECK-NEXT: add x8, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: .loh AdrpAdd Lloh0, Lloh1
entry:
diff --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
index 9912c7a..81f13b8 100644
--- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-elf < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
@var1_32 = global i32 0
@var2_32 = global i32 0
@@ -243,26 +244,48 @@ define void @logical_64bit() minsize {
}
define void @flag_setting() {
-; CHECK-LABEL: flag_setting:
-; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, :got:var1_64
-; CHECK-NEXT: adrp x10, :got:var2_64
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:var1_64]
-; CHECK-NEXT: ldr x10, [x10, :got_lo12:var2_64]
-; CHECK-NEXT: ldr x9, [x8]
-; CHECK-NEXT: ldr x10, [x10]
-; CHECK-NEXT: tst x9, x10
-; CHECK-NEXT: b.gt .LBB2_4
-; CHECK-NEXT: // %bb.1: // %test2
-; CHECK-NEXT: tst x9, x10, lsl #63
-; CHECK-NEXT: b.lt .LBB2_4
-; CHECK-NEXT: // %bb.2: // %test3
-; CHECK-NEXT: tst x9, x10, asr #12
-; CHECK-NEXT: b.gt .LBB2_4
-; CHECK-NEXT: // %bb.3: // %other_exit
-; CHECK-NEXT: str x9, [x8]
-; CHECK-NEXT: .LBB2_4: // %common.ret
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: flag_setting:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: adrp x8, :got:var1_64
+; CHECK-SD-NEXT: adrp x10, :got:var2_64
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:var1_64]
+; CHECK-SD-NEXT: ldr x10, [x10, :got_lo12:var2_64]
+; CHECK-SD-NEXT: ldr x9, [x8]
+; CHECK-SD-NEXT: ldr x10, [x10]
+; CHECK-SD-NEXT: tst x9, x10
+; CHECK-SD-NEXT: b.gt .LBB2_4
+; CHECK-SD-NEXT: // %bb.1: // %test2
+; CHECK-SD-NEXT: tst x9, x10, lsl #63
+; CHECK-SD-NEXT: b.lt .LBB2_4
+; CHECK-SD-NEXT: // %bb.2: // %test3
+; CHECK-SD-NEXT: tst x9, x10, asr #12
+; CHECK-SD-NEXT: b.gt .LBB2_4
+; CHECK-SD-NEXT: // %bb.3: // %other_exit
+; CHECK-SD-NEXT: str x9, [x8]
+; CHECK-SD-NEXT: .LBB2_4: // %common.ret
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: flag_setting:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, :got:var1_64
+; CHECK-GI-NEXT: adrp x10, :got:var2_64
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:var1_64]
+; CHECK-GI-NEXT: ldr x10, [x10, :got_lo12:var2_64]
+; CHECK-GI-NEXT: ldr x9, [x8]
+; CHECK-GI-NEXT: ldr x10, [x10]
+; CHECK-GI-NEXT: tst x9, x10
+; CHECK-GI-NEXT: b.gt .LBB2_4
+; CHECK-GI-NEXT: // %bb.1: // %test2
+; CHECK-GI-NEXT: tst x9, x10, lsl #63
+; CHECK-GI-NEXT: b.lt .LBB2_4
+; CHECK-GI-NEXT: // %bb.2: // %test3
+; CHECK-GI-NEXT: asr x10, x10, #12
+; CHECK-GI-NEXT: tst x10, x9
+; CHECK-GI-NEXT: b.gt .LBB2_4
+; CHECK-GI-NEXT: // %bb.3: // %other_exit
+; CHECK-GI-NEXT: str x9, [x8]
+; CHECK-GI-NEXT: .LBB2_4: // %common.ret
+; CHECK-GI-NEXT: ret
%val1 = load i64, ptr @var1_64
%val2 = load i64, ptr @var2_64
diff --git a/llvm/test/CodeGen/AArch64/midpoint-int.ll b/llvm/test/CodeGen/AArch64/midpoint-int.ll
index bbdce7c..15c1dff 100644
--- a/llvm/test/CodeGen/AArch64/midpoint-int.ll
+++ b/llvm/test/CodeGen/AArch64/midpoint-int.ll
@@ -13,10 +13,9 @@
define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: scalar_i32_signed_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w9, w1, w0
-; CHECK-NEXT: subs w10, w0, w1
+; CHECK-NEXT: subs w9, w0, w1
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: csel w9, w10, w9, gt
+; CHECK-NEXT: cneg w9, w9, le
; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
@@ -35,10 +34,9 @@ define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind {
define i32 @scalar_i32_unsigned_reg_reg(i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: scalar_i32_unsigned_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub w9, w1, w0
-; CHECK-NEXT: subs w10, w0, w1
+; CHECK-NEXT: subs w9, w0, w1
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: csel w9, w10, w9, hi
+; CHECK-NEXT: cneg w9, w9, ls
; CHECK-NEXT: cneg w8, w8, ls
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
@@ -61,11 +59,9 @@ define i32 @scalar_i32_signed_mem_reg(ptr %a1_addr, i32 %a2) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: cmp w9, w1
-; CHECK-NEXT: sub w10, w1, w9
+; CHECK-NEXT: subs w10, w9, w1
+; CHECK-NEXT: cneg w10, w10, le
; CHECK-NEXT: cneg w8, w8, le
-; CHECK-NEXT: subs w11, w9, w1
-; CHECK-NEXT: csel w10, w11, w10, gt
; CHECK-NEXT: lsr w10, w10, #1
; CHECK-NEXT: madd w0, w10, w8, w9
; CHECK-NEXT: ret
@@ -86,11 +82,9 @@ define i32 @scalar_i32_signed_reg_mem(i32 %a1, ptr %a2_addr) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w9, [x1]
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: cmp w0, w9
-; CHECK-NEXT: sub w10, w9, w0
-; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: subs w9, w0, w9
-; CHECK-NEXT: csel w9, w9, w10, gt
+; CHECK-NEXT: cneg w9, w9, le
+; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
; CHECK-NEXT: ret
@@ -112,11 +106,9 @@ define i32 @scalar_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: ldr w10, [x1]
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: cmp w9, w10
-; CHECK-NEXT: sub w11, w10, w9
-; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: subs w10, w9, w10
-; CHECK-NEXT: csel w10, w10, w11, gt
+; CHECK-NEXT: cneg w10, w10, le
+; CHECK-NEXT: cneg w8, w8, le
; CHECK-NEXT: lsr w10, w10, #1
; CHECK-NEXT: madd w0, w10, w8, w9
; CHECK-NEXT: ret
@@ -142,10 +134,9 @@ define i32 @scalar_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind {
; CHECK-LABEL: scalar_i64_signed_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x9, x1, x0
-; CHECK-NEXT: subs x10, x0, x1
+; CHECK-NEXT: subs x9, x0, x1
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: csel x9, x10, x9, gt
+; CHECK-NEXT: cneg x9, x9, le
; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: lsr x9, x9, #1
; CHECK-NEXT: madd x0, x9, x8, x0
@@ -164,10 +155,9 @@ define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind {
define i64 @scalar_i64_unsigned_reg_reg(i64 %a1, i64 %a2) nounwind {
; CHECK-LABEL: scalar_i64_unsigned_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub x9, x1, x0
-; CHECK-NEXT: subs x10, x0, x1
+; CHECK-NEXT: subs x9, x0, x1
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: csel x9, x10, x9, hi
+; CHECK-NEXT: cneg x9, x9, ls
; CHECK-NEXT: cneg x8, x8, ls
; CHECK-NEXT: lsr x9, x9, #1
; CHECK-NEXT: madd x0, x9, x8, x0
@@ -190,11 +180,9 @@ define i64 @scalar_i64_signed_mem_reg(ptr %a1_addr, i64 %a2) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: cmp x9, x1
-; CHECK-NEXT: sub x10, x1, x9
+; CHECK-NEXT: subs x10, x9, x1
+; CHECK-NEXT: cneg x10, x10, le
; CHECK-NEXT: cneg x8, x8, le
-; CHECK-NEXT: subs x11, x9, x1
-; CHECK-NEXT: csel x10, x11, x10, gt
; CHECK-NEXT: lsr x10, x10, #1
; CHECK-NEXT: madd x0, x10, x8, x9
; CHECK-NEXT: ret
@@ -215,11 +203,9 @@ define i64 @scalar_i64_signed_reg_mem(i64 %a1, ptr %a2_addr) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x9, [x1]
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: cmp x0, x9
-; CHECK-NEXT: sub x10, x9, x0
-; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: subs x9, x0, x9
-; CHECK-NEXT: csel x9, x9, x10, gt
+; CHECK-NEXT: cneg x9, x9, le
+; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: lsr x9, x9, #1
; CHECK-NEXT: madd x0, x9, x8, x0
; CHECK-NEXT: ret
@@ -241,11 +227,9 @@ define i64 @scalar_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: ldr x10, [x1]
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: cmp x9, x10
-; CHECK-NEXT: sub x11, x10, x9
-; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: subs x10, x9, x10
-; CHECK-NEXT: csel x10, x10, x11, gt
+; CHECK-NEXT: cneg x10, x10, le
+; CHECK-NEXT: cneg x8, x8, le
; CHECK-NEXT: lsr x10, x10, #1
; CHECK-NEXT: madd x0, x10, x8, x9
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/neg-abs.ll b/llvm/test/CodeGen/AArch64/neg-abs.ll
index 9be0d1a..35cafe5 100644
--- a/llvm/test/CodeGen/AArch64/neg-abs.ll
+++ b/llvm/test/CodeGen/AArch64/neg-abs.ll
@@ -1,15 +1,22 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs \
-; RUN: -mtriple=aarch64-unknown-unknown < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-elf < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
declare i64 @llvm.abs.i64(i64, i1 immarg)
define i64 @neg_abs64(i64 %x) {
-; CHECK-LABEL: neg_abs64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp x0, #0
-; CHECK-NEXT: cneg x0, x0, pl
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: neg_abs64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, #0
+; CHECK-SD-NEXT: cneg x0, x0, pl
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: neg_abs64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp x0, #0
+; CHECK-GI-NEXT: cneg x8, x0, le
+; CHECK-GI-NEXT: neg x0, x8
+; CHECK-GI-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
%neg = sub nsw i64 0, %abs
ret i64 %neg
@@ -18,11 +25,18 @@ define i64 @neg_abs64(i64 %x) {
declare i32 @llvm.abs.i32(i32, i1 immarg)
define i32 @neg_abs32(i32 %x) {
-; CHECK-LABEL: neg_abs32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: cneg w0, w0, pl
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: neg_abs32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, #0
+; CHECK-SD-NEXT: cneg w0, w0, pl
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: neg_abs32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: cneg w8, w0, le
+; CHECK-GI-NEXT: neg w0, w8
+; CHECK-GI-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
%neg = sub nsw i32 0, %abs
ret i32 %neg
@@ -31,12 +45,20 @@ define i32 @neg_abs32(i32 %x) {
declare i16 @llvm.abs.i16(i16, i1 immarg)
define i16 @neg_abs16(i16 %x) {
-; CHECK-LABEL: neg_abs16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sbfx w8, w0, #15, #1
-; CHECK-NEXT: eor w9, w0, w8
-; CHECK-NEXT: sub w0, w8, w9
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: neg_abs16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sbfx w8, w0, #15, #1
+; CHECK-SD-NEXT: eor w9, w0, w8
+; CHECK-SD-NEXT: sub w0, w8, w9
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: neg_abs16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sxth w8, w0
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w8, w0, le
+; CHECK-GI-NEXT: neg w0, w8
+; CHECK-GI-NEXT: ret
%abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true)
%neg = sub nsw i16 0, %abs
ret i16 %neg
@@ -46,14 +68,25 @@ define i16 @neg_abs16(i16 %x) {
declare i128 @llvm.abs.i128(i128, i1 immarg)
define i128 @neg_abs128(i128 %x) {
-; CHECK-LABEL: neg_abs128:
-; CHECK: // %bb.0:
-; CHECK-NEXT: asr x8, x1, #63
-; CHECK-NEXT: eor x9, x0, x8
-; CHECK-NEXT: eor x10, x1, x8
-; CHECK-NEXT: subs x0, x8, x9
-; CHECK-NEXT: sbc x1, x8, x10
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: neg_abs128:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: asr x8, x1, #63
+; CHECK-SD-NEXT: eor x9, x0, x8
+; CHECK-SD-NEXT: eor x10, x1, x8
+; CHECK-SD-NEXT: subs x0, x8, x9
+; CHECK-SD-NEXT: sbc x1, x8, x10
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: neg_abs128:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: asr x8, x1, #63
+; CHECK-GI-NEXT: adds x9, x0, x8
+; CHECK-GI-NEXT: adc x10, x1, x8
+; CHECK-GI-NEXT: eor x9, x9, x8
+; CHECK-GI-NEXT: eor x8, x10, x8
+; CHECK-GI-NEXT: negs x0, x9
+; CHECK-GI-NEXT: ngc x1, x8
+; CHECK-GI-NEXT: ret
%abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true)
%neg = sub nsw i128 0, %abs
ret i128 %neg
@@ -62,46 +95,76 @@ define i128 @neg_abs128(i128 %x) {
define i64 @abs64(i64 %x) {
-; CHECK-LABEL: abs64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp x0, #0
-; CHECK-NEXT: cneg x0, x0, mi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: abs64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, #0
+; CHECK-SD-NEXT: cneg x0, x0, mi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: abs64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp x0, #0
+; CHECK-GI-NEXT: cneg x0, x0, le
+; CHECK-GI-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
ret i64 %abs
}
define i32 @abs32(i32 %x) {
-; CHECK-LABEL: abs32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: cneg w0, w0, mi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: abs32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, #0
+; CHECK-SD-NEXT: cneg w0, w0, mi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: abs32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
+; CHECK-GI-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
ret i32 %abs
}
define i16 @abs16(i16 %x) {
-; CHECK-LABEL: abs16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: cneg w0, w8, mi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: abs16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sxth w8, w0
+; CHECK-SD-NEXT: cmp w8, #0
+; CHECK-SD-NEXT: cneg w0, w8, mi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: abs16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sxth w8, w0
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
+; CHECK-GI-NEXT: ret
%abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true)
ret i16 %abs
}
define i128 @abs128(i128 %x) {
-; CHECK-LABEL: abs128:
-; CHECK: // %bb.0:
-; CHECK-NEXT: asr x8, x1, #63
-; CHECK-NEXT: eor x9, x0, x8
-; CHECK-NEXT: eor x10, x1, x8
-; CHECK-NEXT: subs x0, x9, x8
-; CHECK-NEXT: sbc x1, x10, x8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: abs128:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: asr x8, x1, #63
+; CHECK-SD-NEXT: eor x9, x0, x8
+; CHECK-SD-NEXT: eor x10, x1, x8
+; CHECK-SD-NEXT: subs x0, x9, x8
+; CHECK-SD-NEXT: sbc x1, x10, x8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: abs128:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: asr x8, x1, #63
+; CHECK-GI-NEXT: adds x9, x0, x8
+; CHECK-GI-NEXT: adc x10, x1, x8
+; CHECK-GI-NEXT: eor x0, x9, x8
+; CHECK-GI-NEXT: eor x1, x10, x8
+; CHECK-GI-NEXT: ret
%abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true)
ret i128 %abs
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/neg-selects.ll b/llvm/test/CodeGen/AArch64/neg-selects.ll
index 4ef1633..b643ee7 100644
--- a/llvm/test/CodeGen/AArch64/neg-selects.ll
+++ b/llvm/test/CodeGen/AArch64/neg-selects.ll
@@ -1,12 +1,22 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-none-elf %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-elf < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i32 @neg_select_neg(i32 %a, i32 %b, i1 %bb) {
-; CHECK-LABEL: neg_select_neg:
-; CHECK: // %bb.0:
-; CHECK-NEXT: tst w2, #0x1
-; CHECK-NEXT: csel w0, w0, w1, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: neg_select_neg:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: tst w2, #0x1
+; CHECK-SD-NEXT: csel w0, w0, w1, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: neg_select_neg:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w2, #0x1
+; CHECK-GI-NEXT: neg w9, w0
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: csneg w8, w9, w1, ne
+; CHECK-GI-NEXT: neg w0, w8
+; CHECK-GI-NEXT: ret
%nega = sub i32 0, %a
%negb = sub i32 0, %b
%sel = select i1 %bb, i32 %nega, i32 %negb
@@ -15,11 +25,20 @@ define i32 @neg_select_neg(i32 %a, i32 %b, i1 %bb) {
}
define i32 @negneg_select_nega(i32 %a, i32 %b, i1 %bb) {
-; CHECK-LABEL: negneg_select_nega:
-; CHECK: // %bb.0:
-; CHECK-NEXT: tst w2, #0x1
-; CHECK-NEXT: csneg w0, w1, w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: negneg_select_nega:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: tst w2, #0x1
+; CHECK-SD-NEXT: csneg w0, w1, w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: negneg_select_nega:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w2, #0x1
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: csneg w8, w1, w0, eq
+; CHECK-GI-NEXT: neg w8, w8
+; CHECK-GI-NEXT: neg w0, w8
+; CHECK-GI-NEXT: ret
%nega = sub i32 0, %a
%sel = select i1 %bb, i32 %nega, i32 %b
%nsel = sub i32 0, %sel
@@ -28,11 +47,19 @@ define i32 @negneg_select_nega(i32 %a, i32 %b, i1 %bb) {
}
define i32 @neg_select_nega(i32 %a, i32 %b, i1 %bb) {
-; CHECK-LABEL: neg_select_nega:
-; CHECK: // %bb.0:
-; CHECK-NEXT: tst w2, #0x1
-; CHECK-NEXT: csneg w0, w0, w1, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: neg_select_nega:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: tst w2, #0x1
+; CHECK-SD-NEXT: csneg w0, w0, w1, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: neg_select_nega:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w2, #0x1
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: csneg w8, w1, w0, eq
+; CHECK-GI-NEXT: neg w0, w8
+; CHECK-GI-NEXT: ret
%nega = sub i32 0, %a
%sel = select i1 %bb, i32 %nega, i32 %b
%res = sub i32 0, %sel
@@ -40,11 +67,19 @@ define i32 @neg_select_nega(i32 %a, i32 %b, i1 %bb) {
}
define i32 @neg_select_negb(i32 %a, i32 %b, i1 %bb) {
-; CHECK-LABEL: neg_select_negb:
-; CHECK: // %bb.0:
-; CHECK-NEXT: tst w2, #0x1
-; CHECK-NEXT: csneg w0, w1, w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: neg_select_negb:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: tst w2, #0x1
+; CHECK-SD-NEXT: csneg w0, w1, w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: neg_select_negb:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w2, #0x1
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: csneg w8, w0, w1, ne
+; CHECK-GI-NEXT: neg w0, w8
+; CHECK-GI-NEXT: ret
%negb = sub i32 0, %b
%sel = select i1 %bb, i32 %a, i32 %negb
%res = sub i32 0, %sel
@@ -52,28 +87,47 @@ define i32 @neg_select_negb(i32 %a, i32 %b, i1 %bb) {
}
define i32 @neg_select_ab(i32 %a, i32 %b, i1 %bb) {
-; CHECK-LABEL: neg_select_ab:
-; CHECK: // %bb.0:
-; CHECK-NEXT: tst w2, #0x1
-; CHECK-NEXT: csel w8, w0, w1, ne
-; CHECK-NEXT: neg w0, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: neg_select_ab:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: tst w2, #0x1
+; CHECK-SD-NEXT: csel w8, w0, w1, ne
+; CHECK-SD-NEXT: neg w0, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: neg_select_ab:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w2, #0x1
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: csel w8, w0, w1, ne
+; CHECK-GI-NEXT: neg w0, w8
+; CHECK-GI-NEXT: ret
%sel = select i1 %bb, i32 %a, i32 %b
%res = sub i32 0, %sel
ret i32 %res
}
define i32 @neg_select_nega_with_use(i32 %a, i32 %b, i1 %bb) {
-; CHECK-LABEL: neg_select_nega_with_use:
-; CHECK: // %bb.0:
-; CHECK-NEXT: tst w2, #0x1
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: csneg w9, w1, w0, eq
-; CHECK-NEXT: sub w0, w8, w9
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: neg_select_nega_with_use:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: tst w2, #0x1
+; CHECK-SD-NEXT: neg w8, w0
+; CHECK-SD-NEXT: csneg w9, w1, w0, eq
+; CHECK-SD-NEXT: sub w0, w8, w9
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: neg_select_nega_with_use:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w2, #0x1
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: neg w8, w0
+; CHECK-GI-NEXT: csneg w9, w1, w0, eq
+; CHECK-GI-NEXT: sub w0, w8, w9
+; CHECK-GI-NEXT: ret
%nega = sub i32 0, %a
%sel = select i1 %bb, i32 %nega, i32 %b
%nsel = sub i32 0, %sel
%res = add i32 %nsel, %nega
ret i32 %res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/neon-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-dot-product.ll
index cf09a46..584caa30 100644
--- a/llvm/test/CodeGen/AArch64/neon-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dot-product.ll
@@ -1,13 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod < %s | FileCheck %s
-; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=cortex-a65 < %s | FileCheck %s
-; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=cortex-a65ae < %s | FileCheck %s
-; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=neoverse-e1 < %s | FileCheck %s
-; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=neoverse-n1 < %s | FileCheck %s
-; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=neoverse-n2 < %s | FileCheck %s
-; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=ampere1 < %s | FileCheck %s
-; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=ampere1a < %s | FileCheck %s
-; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=ampere1b < %s | FileCheck %s
+; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
declare <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>)
declare <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>)
@@ -56,10 +49,17 @@ entry:
define <2 x i32> @test_vdot_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
-; CHECK-LABEL: test_vdot_u32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: udot v0.2s, v1.8b, v2.8b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdot_u32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: udot v0.2s, v1.8b, v2.8b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdot_u32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: udot v3.2s, v1.8b, v2.8b
+; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-GI-NEXT: ret
entry:
%vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
%ret = add <2 x i32> %vdot1.i, %a
@@ -67,10 +67,17 @@ entry:
}
define <4 x i32> @test_vdotq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
-; CHECK-LABEL: test_vdotq_u32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: udot v0.4s, v1.16b, v2.16b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdotq_u32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: udot v0.4s, v1.16b, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdotq_u32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: udot v3.4s, v1.16b, v2.16b
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
%ret = add <4 x i32> %vdot1.i, %a
@@ -78,10 +85,17 @@ entry:
}
define <2 x i32> @test_vdot_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
-; CHECK-LABEL: test_vdot_s32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sdot v0.2s, v1.8b, v2.8b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdot_s32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sdot v0.2s, v1.8b, v2.8b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdot_s32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: sdot v3.2s, v1.8b, v2.8b
+; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-GI-NEXT: ret
entry:
%vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
%ret = add <2 x i32> %vdot1.i, %a
@@ -89,10 +103,17 @@ entry:
}
define <4 x i32> @test_vdotq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
-; CHECK-LABEL: test_vdotq_s32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sdot v0.4s, v1.16b, v2.16b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdotq_s32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sdot v0.4s, v1.16b, v2.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdotq_s32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: sdot v3.4s, v1.16b, v2.16b
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
%ret = add <4 x i32> %vdot1.i, %a
@@ -156,11 +177,19 @@ entry:
define <2 x i32> @test_vdot_lane_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK-LABEL: test_vdot_lane_u32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: udot v0.2s, v1.8b, v2.4b[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdot_lane_u32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: udot v0.2s, v1.8b, v2.4b[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdot_lane_u32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: udot v3.2s, v1.8b, v2.4b[1]
+; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-GI-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
@@ -171,11 +200,19 @@ entry:
}
define <4 x i32> @test_vdotq_lane_u32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
-; CHECK-LABEL: test_vdotq_lane_u32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: udot v0.4s, v1.16b, v2.4b[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdotq_lane_u32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: udot v0.4s, v1.16b, v2.4b[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdotq_lane_u32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: udot v3.4s, v1.16b, v2.4b[1]
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -186,10 +223,17 @@ entry:
}
define <2 x i32> @test_vdot_laneq_u32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
-; CHECK-LABEL: test_vdot_laneq_u32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: udot v0.2s, v1.8b, v2.4b[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdot_laneq_u32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: udot v0.2s, v1.8b, v2.4b[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdot_laneq_u32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: udot v3.2s, v1.8b, v2.4b[1]
+; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-GI-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
%shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
@@ -200,10 +244,17 @@ entry:
}
define <4 x i32> @test_vdotq_laneq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK-LABEL: test_vdotq_laneq_u32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: udot v0.4s, v1.16b, v2.4b[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdotq_laneq_u32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: udot v0.4s, v1.16b, v2.4b[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdotq_laneq_u32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: udot v3.4s, v1.16b, v2.4b[1]
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
%shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -270,11 +321,19 @@ entry:
define <2 x i32> @test_vdot_lane_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK-LABEL: test_vdot_lane_s32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: sdot v0.2s, v1.8b, v2.4b[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdot_lane_s32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: sdot v0.2s, v1.8b, v2.4b[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdot_lane_s32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: sdot v3.2s, v1.8b, v2.4b[1]
+; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-GI-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
@@ -285,11 +344,19 @@ entry:
}
define <4 x i32> @test_vdotq_lane_s32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
-; CHECK-LABEL: test_vdotq_lane_s32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT: sdot v0.4s, v1.16b, v2.4b[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdotq_lane_s32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT: sdot v0.4s, v1.16b, v2.4b[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdotq_lane_s32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT: sdot v3.4s, v1.16b, v2.4b[1]
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -300,10 +367,17 @@ entry:
}
define <2 x i32> @test_vdot_laneq_s32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
-; CHECK-LABEL: test_vdot_laneq_s32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sdot v0.2s, v1.8b, v2.4b[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdot_laneq_s32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sdot v0.2s, v1.8b, v2.4b[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdot_laneq_s32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: sdot v3.2s, v1.8b, v2.4b[1]
+; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-GI-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
%shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
@@ -314,10 +388,17 @@ entry:
}
define <4 x i32> @test_vdotq_laneq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK-LABEL: test_vdotq_laneq_s32_zero:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sdot v0.4s, v1.16b, v2.4b[1]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_vdotq_laneq_s32_zero:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: sdot v0.4s, v1.16b, v2.4b[1]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_vdotq_laneq_s32_zero:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: movi v3.2d, #0000000000000000
+; CHECK-GI-NEXT: sdot v3.4s, v1.16b, v2.4b[1]
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
%shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -326,3 +407,6 @@ entry:
%ret = add <4 x i32> %vdot1.i, %a
ret <4 x i32> %ret
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
index 4f0c408..048e988 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -28,46 +28,28 @@ define i32 @test_udot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
;
; CHECK-GI-LABEL: test_udot_v4i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ldr w8, [x0]
-; CHECK-GI-NEXT: ldr w9, [x1]
+; CHECK-GI-NEXT: ldr w8, [x1]
+; CHECK-GI-NEXT: ldr w9, [x0]
; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: uxtb w8, w8
-; CHECK-GI-NEXT: uxtb w9, w9
-; CHECK-GI-NEXT: mov b1, v0.b[1]
-; CHECK-GI-NEXT: mov b3, v0.b[2]
-; CHECK-GI-NEXT: mov b5, v2.b[2]
-; CHECK-GI-NEXT: mov b4, v0.b[3]
-; CHECK-GI-NEXT: mov b0, v2.b[1]
-; CHECK-GI-NEXT: mov b6, v2.b[3]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: fmov w10, s1
-; CHECK-GI-NEXT: fmov w11, s3
-; CHECK-GI-NEXT: fmov s1, w8
-; CHECK-GI-NEXT: fmov w13, s5
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w12, s0
-; CHECK-GI-NEXT: uxtb w10, w10
-; CHECK-GI-NEXT: uxtb w11, w11
-; CHECK-GI-NEXT: uxtb w13, w13
-; CHECK-GI-NEXT: uxtb w8, w8
-; CHECK-GI-NEXT: uxtb w12, w12
-; CHECK-GI-NEXT: mov v1.h[1], w10
-; CHECK-GI-NEXT: fmov w10, s6
-; CHECK-GI-NEXT: fmov s0, w11
-; CHECK-GI-NEXT: fmov s3, w13
-; CHECK-GI-NEXT: mov v2.h[1], w12
-; CHECK-GI-NEXT: uxtb w10, w10
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: mov v3.h[1], w10
-; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov v3.b[0], v0.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[1]
+; CHECK-GI-NEXT: mov v5.b[0], v1.b[0]
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b0, v0.b[3]
+; CHECK-GI-NEXT: mov v5.b[1], v4.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b1, v1.b[3]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v5.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v0.b[0]
+; CHECK-GI-NEXT: mov v5.b[3], v1.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
+; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: uaddlv s0, v0.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -128,46 +110,28 @@ define i32 @test_sdot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
;
; CHECK-GI-LABEL: test_sdot_v4i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ldr w8, [x0]
-; CHECK-GI-NEXT: ldr w9, [x1]
+; CHECK-GI-NEXT: ldr w8, [x1]
+; CHECK-GI-NEXT: ldr w9, [x0]
; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov b1, v0.b[1]
-; CHECK-GI-NEXT: mov b3, v0.b[2]
-; CHECK-GI-NEXT: mov b5, v2.b[2]
-; CHECK-GI-NEXT: mov b4, v0.b[3]
-; CHECK-GI-NEXT: mov b0, v2.b[1]
-; CHECK-GI-NEXT: mov b6, v2.b[3]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: fmov w10, s1
-; CHECK-GI-NEXT: fmov w11, s3
-; CHECK-GI-NEXT: fmov s1, w8
-; CHECK-GI-NEXT: fmov w13, s5
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w12, s0
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v1.h[1], w10
-; CHECK-GI-NEXT: fmov w10, s6
-; CHECK-GI-NEXT: fmov s0, w11
-; CHECK-GI-NEXT: fmov s3, w13
-; CHECK-GI-NEXT: mov v2.h[1], w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: mov v3.h[1], w10
-; CHECK-GI-NEXT: sshll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov v3.b[0], v0.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[1]
+; CHECK-GI-NEXT: mov v5.b[0], v1.b[0]
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b0, v0.b[3]
+; CHECK-GI-NEXT: mov v5.b[1], v4.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b1, v1.b[3]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v5.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v0.b[0]
+; CHECK-GI-NEXT: mov v5.b[3], v1.b[0]
+; CHECK-GI-NEXT: sshll v0.8h, v3.8b, #0
+; CHECK-GI-NEXT: sshll v1.8h, v5.8b, #0
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: saddlv s0, v0.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -205,22 +169,18 @@ define i32 @test_sdot_v4i8_double(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8
;
; CHECK-GI-LABEL: test_sdot_v4i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: shl v0.4s, v0.4s, #24
-; CHECK-GI-NEXT: shl v1.4s, v1.4s, #24
-; CHECK-GI-NEXT: shl v2.4s, v2.4s, #24
-; CHECK-GI-NEXT: shl v3.4s, v3.4s, #24
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #24
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #24
-; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #24
-; CHECK-GI-NEXT: sshr v3.4s, v3.4s, #24
-; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: mul v1.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
-; CHECK-GI-NEXT: addv s1, v1.4s
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: shl v1.4h, v1.4h, #8
+; CHECK-GI-NEXT: shl v2.4h, v2.4h, #8
+; CHECK-GI-NEXT: shl v3.4h, v3.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #8
+; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #8
+; CHECK-GI-NEXT: sshr v3.4h, v3.4h, #8
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mul v1.4h, v2.4h, v3.4h
+; CHECK-GI-NEXT: saddlv s0, v0.4h
+; CHECK-GI-NEXT: saddlv s1, v1.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
@@ -414,31 +374,60 @@ define i32 @test_udot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d0, [x0]
; CHECK-GI-NEXT: ldr d1, [x1]
-; CHECK-GI-NEXT: umov w8, v1.b[4]
-; CHECK-GI-NEXT: umov w9, v0.b[4]
-; CHECK-GI-NEXT: umov w10, v1.b[0]
-; CHECK-GI-NEXT: umov w12, v0.b[0]
-; CHECK-GI-NEXT: umov w11, v1.b[1]
-; CHECK-GI-NEXT: umov w13, v0.b[1]
-; CHECK-GI-NEXT: mul w8, w8, w9
-; CHECK-GI-NEXT: fmov s2, w10
-; CHECK-GI-NEXT: umov w9, v1.b[2]
-; CHECK-GI-NEXT: fmov s3, w12
-; CHECK-GI-NEXT: umov w10, v1.b[3]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: umov w8, v0.b[2]
-; CHECK-GI-NEXT: mov v3.s[1], w13
-; CHECK-GI-NEXT: umov w11, v0.b[3]
-; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w9
-; CHECK-GI-NEXT: mov v3.s[2], w8
-; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: mov v3.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v4.4s
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov b3, v1.b[1]
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b5, v0.b[2]
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b0, v0.b[4]
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v1.b[4]
+; CHECK-GI-NEXT: fmov w10, s3
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: fmov w11, s2
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov w8, s4
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: mov v3.h[1], w11
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[2], w8
+; CHECK-GI-NEXT: mov v3.h[2], w9
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov w9, s6
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: umov w8, v0.h[0]
+; CHECK-GI-NEXT: umov w9, v0.h[4]
+; CHECK-GI-NEXT: umov w10, v0.h[1]
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: umov w8, v0.h[2]
+; CHECK-GI-NEXT: umov w9, v0.h[3]
+; CHECK-GI-NEXT: mov v1.s[1], w10
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: mov v1.s[2], w8
+; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v1.s[3], w9
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -511,31 +500,60 @@ define i32 @test_sdot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d0, [x0]
; CHECK-GI-NEXT: ldr d1, [x1]
-; CHECK-GI-NEXT: smov w8, v1.b[4]
-; CHECK-GI-NEXT: smov w9, v0.b[4]
-; CHECK-GI-NEXT: smov w10, v1.b[0]
-; CHECK-GI-NEXT: smov w12, v0.b[0]
-; CHECK-GI-NEXT: smov w11, v1.b[1]
-; CHECK-GI-NEXT: smov w13, v0.b[1]
-; CHECK-GI-NEXT: mul w8, w8, w9
-; CHECK-GI-NEXT: fmov s2, w10
-; CHECK-GI-NEXT: smov w9, v1.b[2]
-; CHECK-GI-NEXT: fmov s3, w12
-; CHECK-GI-NEXT: smov w10, v1.b[3]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: smov w8, v0.b[2]
-; CHECK-GI-NEXT: mov v3.s[1], w13
-; CHECK-GI-NEXT: smov w11, v0.b[3]
-; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w9
-; CHECK-GI-NEXT: mov v3.s[2], w8
-; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: mov v3.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v4.4s
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov b3, v1.b[1]
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b5, v0.b[2]
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b0, v0.b[4]
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v1.b[4]
+; CHECK-GI-NEXT: fmov w10, s3
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov w11, s2
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov w8, s4
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: mov v3.h[1], w11
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[2], w8
+; CHECK-GI-NEXT: mov v3.h[2], w9
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov w9, s6
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: smov w9, v0.h[4]
+; CHECK-GI-NEXT: smov w10, v0.h[1]
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v0.h[3]
+; CHECK-GI-NEXT: mov v1.s[1], w10
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: mov v1.s[2], w8
+; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v1.s[3], w9
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -571,59 +589,117 @@ define i32 @test_sdot_v5i8_double(<5 x i8> %a, <5 x i8> %b, <5 x i8> %c, <5 x i8
; CHECK-GI-LABEL: test_sdot_v5i8_double:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov b17, v0.b[1]
+; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3
-; CHECK-GI-NEXT: smov w9, v1.b[0]
-; CHECK-GI-NEXT: smov w10, v0.b[4]
-; CHECK-GI-NEXT: smov w11, v1.b[4]
-; CHECK-GI-NEXT: smov w12, v2.b[0]
-; CHECK-GI-NEXT: smov w13, v2.b[4]
-; CHECK-GI-NEXT: smov w14, v3.b[4]
-; CHECK-GI-NEXT: smov w8, v0.b[0]
-; CHECK-GI-NEXT: smov w16, v3.b[0]
-; CHECK-GI-NEXT: smov w15, v0.b[1]
-; CHECK-GI-NEXT: fmov s5, w9
-; CHECK-GI-NEXT: mul w9, w10, w11
-; CHECK-GI-NEXT: smov w10, v1.b[1]
-; CHECK-GI-NEXT: fmov s6, w12
-; CHECK-GI-NEXT: mul w12, w13, w14
-; CHECK-GI-NEXT: smov w11, v2.b[1]
-; CHECK-GI-NEXT: smov w13, v3.b[1]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: fmov s7, w16
-; CHECK-GI-NEXT: fmov s16, w9
-; CHECK-GI-NEXT: smov w8, v0.b[2]
-; CHECK-GI-NEXT: smov w14, v1.b[2]
-; CHECK-GI-NEXT: fmov s17, w12
-; CHECK-GI-NEXT: smov w9, v3.b[2]
-; CHECK-GI-NEXT: mov v5.s[1], w10
-; CHECK-GI-NEXT: mov v4.s[1], w15
-; CHECK-GI-NEXT: smov w15, v2.b[2]
-; CHECK-GI-NEXT: mov v6.s[1], w11
-; CHECK-GI-NEXT: mov v16.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w13
-; CHECK-GI-NEXT: smov w10, v0.b[3]
-; CHECK-GI-NEXT: mov v17.s[1], wzr
-; CHECK-GI-NEXT: smov w11, v1.b[3]
-; CHECK-GI-NEXT: smov w12, v2.b[3]
-; CHECK-GI-NEXT: smov w13, v3.b[3]
-; CHECK-GI-NEXT: mov v5.s[2], w14
-; CHECK-GI-NEXT: mov v4.s[2], w8
-; CHECK-GI-NEXT: mov v6.s[2], w15
-; CHECK-GI-NEXT: mov v16.s[2], wzr
-; CHECK-GI-NEXT: mov v7.s[2], w9
-; CHECK-GI-NEXT: mov v17.s[2], wzr
-; CHECK-GI-NEXT: mov v5.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], w10
-; CHECK-GI-NEXT: mov v6.s[3], w12
-; CHECK-GI-NEXT: mov v16.s[3], wzr
-; CHECK-GI-NEXT: mov v7.s[3], w13
-; CHECK-GI-NEXT: mov v17.s[3], wzr
-; CHECK-GI-NEXT: mla v16.4s, v4.4s, v5.4s
-; CHECK-GI-NEXT: mla v17.4s, v6.4s, v7.4s
-; CHECK-GI-NEXT: addv s0, v16.4s
-; CHECK-GI-NEXT: addv s1, v17.4s
+; CHECK-GI-NEXT: fmov w11, s1
+; CHECK-GI-NEXT: mov b25, v1.b[1]
+; CHECK-GI-NEXT: mov b16, v1.b[2]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b5, v1.b[4]
+; CHECK-GI-NEXT: mov b22, v2.b[1]
+; CHECK-GI-NEXT: mov b23, v3.b[1]
+; CHECK-GI-NEXT: sxtb w9, w8
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov b24, v0.b[2]
+; CHECK-GI-NEXT: fmov w8, s17
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b4, v0.b[4]
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b18, v2.b[2]
+; CHECK-GI-NEXT: mov b19, v2.b[3]
+; CHECK-GI-NEXT: mov b0, v2.b[4]
+; CHECK-GI-NEXT: fmov w9, s25
+; CHECK-GI-NEXT: fmov w12, s22
+; CHECK-GI-NEXT: sxtb w10, w8
+; CHECK-GI-NEXT: mov b21, v3.b[2]
+; CHECK-GI-NEXT: fmov w13, s23
+; CHECK-GI-NEXT: mov b20, v3.b[3]
+; CHECK-GI-NEXT: mov b17, v3.b[4]
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: mov v1.h[1], w10
+; CHECK-GI-NEXT: sxtb w13, w13
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov s2, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov w11, s3
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[1], w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v1.h[2], w8
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov s3, w10
+; CHECK-GI-NEXT: fmov w10, s18
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov s22, w11
+; CHECK-GI-NEXT: fmov w11, s21
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v3.h[1], w12
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[2], w9
+; CHECK-GI-NEXT: mov v22.h[1], w13
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: fmov w9, s19
+; CHECK-GI-NEXT: fmov w12, s6
+; CHECK-GI-NEXT: mov v3.h[2], w10
+; CHECK-GI-NEXT: fmov w10, s20
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v22.h[2], w11
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: fmov w11, s4
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v1.h[3], w12
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v22.h[3], w10
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v1.h[4], w11
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mov v22.h[4], w10
+; CHECK-GI-NEXT: mul v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: mul v1.8h, v3.8h, v22.8h
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: smov w9, v0.h[4]
+; CHECK-GI-NEXT: smov w11, v0.h[1]
+; CHECK-GI-NEXT: smov w10, v1.h[0]
+; CHECK-GI-NEXT: smov w12, v1.h[4]
+; CHECK-GI-NEXT: smov w13, v1.h[1]
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v1.h[2]
+; CHECK-GI-NEXT: fmov s4, w10
+; CHECK-GI-NEXT: fmov s5, w12
+; CHECK-GI-NEXT: mov v2.s[1], w11
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: smov w10, v0.h[3]
+; CHECK-GI-NEXT: smov w11, v1.h[3]
+; CHECK-GI-NEXT: mov v4.s[1], w13
+; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: mov v2.s[2], w8
+; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v4.s[2], w9
+; CHECK-GI-NEXT: mov v5.s[2], wzr
+; CHECK-GI-NEXT: mov v2.s[3], w10
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v4.s[3], w11
+; CHECK-GI-NEXT: mov v5.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: add v1.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
@@ -2303,11 +2379,14 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_udot_v25i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
; CHECK-GI-NEXT: .cfi_offset w19, -8
; CHECK-GI-NEXT: .cfi_offset w20, -16
; CHECK-GI-NEXT: .cfi_offset w21, -24
@@ -2316,132 +2395,282 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
; CHECK-GI-NEXT: .cfi_offset w24, -48
; CHECK-GI-NEXT: .cfi_offset w25, -56
; CHECK-GI-NEXT: .cfi_offset w26, -64
-; CHECK-GI-NEXT: ldp q1, q7, [x1]
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q2, q1, [x1]
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: ldp q16, q3, [x0]
-; CHECK-GI-NEXT: umov w9, v1.b[4]
-; CHECK-GI-NEXT: umov w11, v1.b[5]
-; CHECK-GI-NEXT: umov w18, v1.b[0]
-; CHECK-GI-NEXT: umov w0, v1.b[12]
-; CHECK-GI-NEXT: umov w3, v7.b[4]
-; CHECK-GI-NEXT: umov w12, v1.b[1]
-; CHECK-GI-NEXT: umov w13, v1.b[6]
-; CHECK-GI-NEXT: umov w1, v1.b[13]
-; CHECK-GI-NEXT: umov w4, v7.b[5]
-; CHECK-GI-NEXT: umov w15, v1.b[2]
-; CHECK-GI-NEXT: umov w8, v1.b[3]
-; CHECK-GI-NEXT: umov w16, v1.b[7]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: umov w14, v1.b[8]
-; CHECK-GI-NEXT: umov w17, v1.b[9]
-; CHECK-GI-NEXT: umov w10, v1.b[10]
-; CHECK-GI-NEXT: umov w9, v1.b[11]
-; CHECK-GI-NEXT: umov w5, v1.b[14]
-; CHECK-GI-NEXT: umov w6, v7.b[0]
-; CHECK-GI-NEXT: fmov s4, w0
-; CHECK-GI-NEXT: fmov s5, w3
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: umov w11, v1.b[15]
-; CHECK-GI-NEXT: fmov s1, w18
-; CHECK-GI-NEXT: umov w7, v7.b[1]
-; CHECK-GI-NEXT: umov w18, v7.b[6]
-; CHECK-GI-NEXT: umov w21, v16.b[4]
-; CHECK-GI-NEXT: mov v4.s[1], w1
-; CHECK-GI-NEXT: mov v5.s[1], w4
-; CHECK-GI-NEXT: fmov s6, w14
-; CHECK-GI-NEXT: mov v1.s[1], w12
-; CHECK-GI-NEXT: umov w12, v7.b[3]
-; CHECK-GI-NEXT: umov w14, v7.b[7]
-; CHECK-GI-NEXT: mov v2.s[2], w13
-; CHECK-GI-NEXT: umov w13, v7.b[2]
-; CHECK-GI-NEXT: umov w0, v7.b[8]
-; CHECK-GI-NEXT: fmov s7, w6
-; CHECK-GI-NEXT: umov w23, v16.b[12]
-; CHECK-GI-NEXT: umov w25, v3.b[4]
-; CHECK-GI-NEXT: mov v6.s[1], w17
-; CHECK-GI-NEXT: mov v4.s[2], w5
-; CHECK-GI-NEXT: mov v5.s[2], w18
-; CHECK-GI-NEXT: mov v1.s[2], w15
-; CHECK-GI-NEXT: umov w6, v16.b[0]
-; CHECK-GI-NEXT: umov w3, v16.b[1]
-; CHECK-GI-NEXT: mov v2.s[3], w16
-; CHECK-GI-NEXT: mov v7.s[1], w7
-; CHECK-GI-NEXT: umov w16, v16.b[2]
-; CHECK-GI-NEXT: umov w15, v16.b[3]
-; CHECK-GI-NEXT: umov w22, v16.b[5]
-; CHECK-GI-NEXT: umov w5, v16.b[6]
-; CHECK-GI-NEXT: umov w18, v16.b[7]
-; CHECK-GI-NEXT: umov w19, v16.b[8]
-; CHECK-GI-NEXT: umov w7, v16.b[9]
-; CHECK-GI-NEXT: umov w24, v16.b[13]
-; CHECK-GI-NEXT: umov w1, v16.b[10]
-; CHECK-GI-NEXT: umov w17, v16.b[11]
-; CHECK-GI-NEXT: umov w20, v16.b[14]
-; CHECK-GI-NEXT: umov w4, v16.b[15]
-; CHECK-GI-NEXT: fmov s16, w21
-; CHECK-GI-NEXT: umov w21, v3.b[8]
-; CHECK-GI-NEXT: umov w26, v3.b[5]
-; CHECK-GI-NEXT: fmov s17, w23
-; CHECK-GI-NEXT: umov w23, v3.b[0]
-; CHECK-GI-NEXT: fmov s18, w25
-; CHECK-GI-NEXT: umov w25, v3.b[3]
-; CHECK-GI-NEXT: mov v16.s[1], w22
-; CHECK-GI-NEXT: umov w22, v3.b[1]
-; CHECK-GI-NEXT: fmov s19, w6
-; CHECK-GI-NEXT: mov v17.s[1], w24
-; CHECK-GI-NEXT: umov w24, v3.b[2]
-; CHECK-GI-NEXT: umov w6, v3.b[7]
-; CHECK-GI-NEXT: mul w0, w0, w21
-; CHECK-GI-NEXT: mov v18.s[1], w26
-; CHECK-GI-NEXT: umov w26, v3.b[6]
-; CHECK-GI-NEXT: fmov s3, w19
-; CHECK-GI-NEXT: fmov s20, w23
-; CHECK-GI-NEXT: mov v19.s[1], w3
-; CHECK-GI-NEXT: mov v16.s[2], w5
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b6, v2.b[3]
+; CHECK-GI-NEXT: mov b7, v2.b[4]
+; CHECK-GI-NEXT: mov b16, v2.b[5]
+; CHECK-GI-NEXT: mov b19, v2.b[8]
+; CHECK-GI-NEXT: mov b4, v2.b[1]
+; CHECK-GI-NEXT: mov b5, v2.b[2]
+; CHECK-GI-NEXT: mov b17, v2.b[6]
+; CHECK-GI-NEXT: mov b18, v2.b[7]
+; CHECK-GI-NEXT: mov b20, v2.b[9]
+; CHECK-GI-NEXT: mov b21, v2.b[10]
+; CHECK-GI-NEXT: mov b22, v2.b[11]
+; CHECK-GI-NEXT: fmov w7, s2
+; CHECK-GI-NEXT: fmov w13, s6
+; CHECK-GI-NEXT: mov b6, v2.b[12]
+; CHECK-GI-NEXT: fmov w2, s7
+; CHECK-GI-NEXT: mov b7, v2.b[13]
+; CHECK-GI-NEXT: fmov w11, s16
+; CHECK-GI-NEXT: mov b16, v2.b[14]
+; CHECK-GI-NEXT: mov b23, v2.b[15]
+; CHECK-GI-NEXT: ldp q3, q2, [x0]
+; CHECK-GI-NEXT: fmov w26, s19
+; CHECK-GI-NEXT: fmov w19, s4
+; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: fmov w29, s5
+; CHECK-GI-NEXT: fmov w24, s20
+; CHECK-GI-NEXT: uxtb w8, w7
+; CHECK-GI-NEXT: mov b4, v3.b[2]
+; CHECK-GI-NEXT: mov b5, v3.b[1]
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov b17, v1.b[1]
+; CHECK-GI-NEXT: fmov w22, s21
+; CHECK-GI-NEXT: uxtb w26, w26
+; CHECK-GI-NEXT: mov b18, v1.b[2]
+; CHECK-GI-NEXT: fmov w18, s22
+; CHECK-GI-NEXT: uxtb w24, w24
+; CHECK-GI-NEXT: mov b19, v1.b[3]
+; CHECK-GI-NEXT: fmov w16, s6
+; CHECK-GI-NEXT: uxtb w19, w19
+; CHECK-GI-NEXT: mov b21, v1.b[4]
+; CHECK-GI-NEXT: fmov w15, s7
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov b7, v1.b[5]
+; CHECK-GI-NEXT: mov b6, v3.b[3]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov w12, s23
+; CHECK-GI-NEXT: mov b22, v1.b[6]
+; CHECK-GI-NEXT: mov b23, v1.b[7]
+; CHECK-GI-NEXT: mov b20, v3.b[4]
+; CHECK-GI-NEXT: fmov w28, s4
+; CHECK-GI-NEXT: fmov s4, w26
+; CHECK-GI-NEXT: fmov w14, s16
+; CHECK-GI-NEXT: fmov w27, s17
+; CHECK-GI-NEXT: fmov w5, s18
+; CHECK-GI-NEXT: uxtb w12, w12
+; CHECK-GI-NEXT: fmov w4, s19
+; CHECK-GI-NEXT: mov b19, v3.b[5]
+; CHECK-GI-NEXT: uxtb w28, w28
+; CHECK-GI-NEXT: fmov w3, s21
+; CHECK-GI-NEXT: mov b18, v3.b[6]
+; CHECK-GI-NEXT: uxtb w27, w27
+; CHECK-GI-NEXT: uxtb w5, w5
+; CHECK-GI-NEXT: fmov w1, s7
+; CHECK-GI-NEXT: mov b16, v3.b[7]
+; CHECK-GI-NEXT: fmov w0, s22
+; CHECK-GI-NEXT: mov b17, v3.b[8]
+; CHECK-GI-NEXT: fmov w17, s23
+; CHECK-GI-NEXT: mov b7, v3.b[9]
+; CHECK-GI-NEXT: fmov w30, s5
+; CHECK-GI-NEXT: mov b5, v3.b[10]
+; CHECK-GI-NEXT: mov b21, v3.b[11]
+; CHECK-GI-NEXT: fmov w25, s6
+; CHECK-GI-NEXT: mov b6, v3.b[12]
+; CHECK-GI-NEXT: fmov w23, s20
+; CHECK-GI-NEXT: mov b20, v3.b[13]
+; CHECK-GI-NEXT: mov b22, v3.b[14]
+; CHECK-GI-NEXT: fmov w6, s3
+; CHECK-GI-NEXT: mov b23, v3.b[15]
+; CHECK-GI-NEXT: fmov s3, w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v4.h[1], w24
+; CHECK-GI-NEXT: fmov w21, s19
+; CHECK-GI-NEXT: mov b19, v2.b[1]
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: fmov w24, s6
+; CHECK-GI-NEXT: fmov w7, s16
+; CHECK-GI-NEXT: mov b16, v2.b[2]
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov v3.h[1], w19
+; CHECK-GI-NEXT: uxtb w19, w29
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: fmov w29, s5
+; CHECK-GI-NEXT: mov v4.h[2], w22
+; CHECK-GI-NEXT: uxtb w22, w6
+; CHECK-GI-NEXT: fmov s5, w8
+; CHECK-GI-NEXT: fmov w10, s7
+; CHECK-GI-NEXT: fmov s7, w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: fmov w20, s18
+; CHECK-GI-NEXT: uxtb w29, w29
+; CHECK-GI-NEXT: fmov s6, w22
+; CHECK-GI-NEXT: fmov w22, s2
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: mov v5.h[1], w27
+; CHECK-GI-NEXT: uxtb w27, w30
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b18, v2.b[3]
+; CHECK-GI-NEXT: mov v3.h[2], w19
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov v6.h[1], w27
+; CHECK-GI-NEXT: fmov w27, s19
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: fmov w26, s21
+; CHECK-GI-NEXT: mov b17, v2.b[4]
+; CHECK-GI-NEXT: fmov s16, w22
+; CHECK-GI-NEXT: mov v5.h[2], w5
+; CHECK-GI-NEXT: uxtb w5, w25
+; CHECK-GI-NEXT: uxtb w27, w27
+; CHECK-GI-NEXT: fmov w10, s18
+; CHECK-GI-NEXT: mov v3.h[3], w13
+; CHECK-GI-NEXT: uxtb w13, w4
+; CHECK-GI-NEXT: mov v6.h[2], w28
+; CHECK-GI-NEXT: fmov w8, s20
+; CHECK-GI-NEXT: mov v16.h[1], w27
+; CHECK-GI-NEXT: mov v7.h[2], w29
+; CHECK-GI-NEXT: mov b20, v2.b[5]
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: fmov w22, s17
+; CHECK-GI-NEXT: mov v5.h[3], w13
+; CHECK-GI-NEXT: uxtb w13, w2
+; CHECK-GI-NEXT: mov v6.h[3], w5
+; CHECK-GI-NEXT: mov b21, v2.b[6]
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w18
+; CHECK-GI-NEXT: uxtb w18, w23
+; CHECK-GI-NEXT: mov v3.h[4], w13
+; CHECK-GI-NEXT: uxtb w13, w24
+; CHECK-GI-NEXT: fmov w27, s20
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v4.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[3], w10
+; CHECK-GI-NEXT: uxtb w10, w3
+; CHECK-GI-NEXT: mov v6.h[4], w18
+; CHECK-GI-NEXT: ldr w18, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w16
+; CHECK-GI-NEXT: uxtb w16, w22
+; CHECK-GI-NEXT: mov v5.h[4], w10
+; CHECK-GI-NEXT: uxtb w10, w15
+; CHECK-GI-NEXT: uxtb w18, w18
+; CHECK-GI-NEXT: mov v4.h[4], w9
+; CHECK-GI-NEXT: uxtb w9, w21
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[4], w16
+; CHECK-GI-NEXT: mov v7.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v6.h[5], w9
+; CHECK-GI-NEXT: uxtb w9, w1
+; CHECK-GI-NEXT: mov v3.h[5], w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: fmov w19, s22
+; CHECK-GI-NEXT: fmov w28, s21
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov b17, v2.b[7]
+; CHECK-GI-NEXT: mov v5.h[5], w9
+; CHECK-GI-NEXT: uxtb w9, w0
+; CHECK-GI-NEXT: mov v4.h[5], w10
+; CHECK-GI-NEXT: uxtb w10, w20
+; CHECK-GI-NEXT: mov v7.h[5], w8
+; CHECK-GI-NEXT: mov v16.h[5], w11
+; CHECK-GI-NEXT: uxtb w8, w14
+; CHECK-GI-NEXT: uxtb w11, w28
+; CHECK-GI-NEXT: mov v6.h[6], w10
+; CHECK-GI-NEXT: uxtb w10, w19
+; CHECK-GI-NEXT: fmov w6, s23
+; CHECK-GI-NEXT: mov v5.h[6], w9
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: mov v3.h[6], w18
+; CHECK-GI-NEXT: mov v4.h[6], w8
+; CHECK-GI-NEXT: uxtb w8, w7
+; CHECK-GI-NEXT: mov v7.h[6], w10
+; CHECK-GI-NEXT: mov v16.h[6], w11
+; CHECK-GI-NEXT: uxtb w10, w6
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w0
-; CHECK-GI-NEXT: mov v17.s[2], w20
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v3.s[1], w7
-; CHECK-GI-NEXT: mov v20.s[1], w22
-; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[2], w26
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v16.s[3], w18
-; CHECK-GI-NEXT: mov v17.s[3], w4
-; CHECK-GI-NEXT: mov v7.s[2], w13
-; CHECK-GI-NEXT: mov v5.s[3], w14
-; CHECK-GI-NEXT: mov v19.s[2], w16
-; CHECK-GI-NEXT: mov v3.s[2], w1
+; CHECK-GI-NEXT: mov v6.h[7], w8
+; CHECK-GI-NEXT: uxtb w8, w17
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v3.h[7], w13
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v4.h[7], w12
+; CHECK-GI-NEXT: mov v5.h[7], w8
+; CHECK-GI-NEXT: mov v7.h[7], w10
+; CHECK-GI-NEXT: mov v16.h[7], w9
+; CHECK-GI-NEXT: umov w8, v1.b[8]
+; CHECK-GI-NEXT: umov w9, v2.b[8]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v20.s[2], w24
-; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[3], w6
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s
-; CHECK-GI-NEXT: mov v1.s[3], w8
-; CHECK-GI-NEXT: mov v6.s[3], w9
-; CHECK-GI-NEXT: mov v7.s[3], w12
-; CHECK-GI-NEXT: mov v19.s[3], w15
-; CHECK-GI-NEXT: mov v3.s[3], w17
-; CHECK-GI-NEXT: mov v20.s[3], w25
+; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h
+; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h
+; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h
+; CHECK-GI-NEXT: mul w15, w8, w9
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s
-; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s
-; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
-; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-NEXT: umov w16, v3.h[0]
+; CHECK-GI-NEXT: umov w18, v3.h[4]
+; CHECK-GI-NEXT: umov w17, v3.h[1]
+; CHECK-GI-NEXT: umov w1, v2.h[0]
+; CHECK-GI-NEXT: umov w3, v2.h[4]
+; CHECK-GI-NEXT: umov w0, v3.h[5]
+; CHECK-GI-NEXT: umov w5, v1.h[0]
+; CHECK-GI-NEXT: umov w7, v1.h[4]
+; CHECK-GI-NEXT: umov w2, v2.h[1]
+; CHECK-GI-NEXT: umov w4, v2.h[5]
+; CHECK-GI-NEXT: umov w6, v1.h[1]
+; CHECK-GI-NEXT: umov w19, v1.h[5]
+; CHECK-GI-NEXT: umov w10, v3.h[2]
+; CHECK-GI-NEXT: umov w8, v3.h[3]
+; CHECK-GI-NEXT: umov w11, v3.h[6]
+; CHECK-GI-NEXT: umov w9, v3.h[7]
+; CHECK-GI-NEXT: fmov s3, w16
+; CHECK-GI-NEXT: fmov s4, w18
+; CHECK-GI-NEXT: fmov s5, w1
+; CHECK-GI-NEXT: fmov s6, w3
+; CHECK-GI-NEXT: fmov s7, w5
+; CHECK-GI-NEXT: fmov s16, w7
+; CHECK-GI-NEXT: fmov s17, w15
+; CHECK-GI-NEXT: umov w12, v2.h[2]
+; CHECK-GI-NEXT: umov w13, v2.h[6]
+; CHECK-GI-NEXT: umov w14, v1.h[2]
+; CHECK-GI-NEXT: umov w16, v1.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w17
+; CHECK-GI-NEXT: mov v4.s[1], w0
+; CHECK-GI-NEXT: mov v5.s[1], w2
+; CHECK-GI-NEXT: mov v6.s[1], w4
+; CHECK-GI-NEXT: mov v7.s[1], w6
+; CHECK-GI-NEXT: mov v16.s[1], w19
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[1], wzr
+; CHECK-GI-NEXT: umov w15, v2.h[3]
+; CHECK-GI-NEXT: umov w17, v2.h[7]
+; CHECK-GI-NEXT: umov w18, v1.h[3]
+; CHECK-GI-NEXT: umov w0, v1.h[7]
+; CHECK-GI-NEXT: mov v3.s[2], w10
+; CHECK-GI-NEXT: mov v4.s[2], w11
+; CHECK-GI-NEXT: mov v5.s[2], w12
+; CHECK-GI-NEXT: mov v6.s[2], w13
+; CHECK-GI-NEXT: mov v7.s[2], w14
+; CHECK-GI-NEXT: mov v16.s[2], w16
+; CHECK-GI-NEXT: mov v17.s[2], wzr
+; CHECK-GI-NEXT: mov v3.s[3], w8
+; CHECK-GI-NEXT: mov v4.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.s[3], w15
+; CHECK-GI-NEXT: mov v6.s[3], w17
+; CHECK-GI-NEXT: mov v7.s[3], w18
+; CHECK-GI-NEXT: mov v16.s[3], w0
+; CHECK-GI-NEXT: mov v17.s[3], wzr
+; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s
+; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <25 x i8>, ptr %a
@@ -2580,11 +2809,14 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_sdot_v25i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
; CHECK-GI-NEXT: .cfi_offset w19, -8
; CHECK-GI-NEXT: .cfi_offset w20, -16
; CHECK-GI-NEXT: .cfi_offset w21, -24
@@ -2593,132 +2825,283 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
; CHECK-GI-NEXT: .cfi_offset w24, -48
; CHECK-GI-NEXT: .cfi_offset w25, -56
; CHECK-GI-NEXT: .cfi_offset w26, -64
-; CHECK-GI-NEXT: ldp q1, q7, [x1]
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q2, q1, [x1]
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: ldp q16, q3, [x0]
-; CHECK-GI-NEXT: smov w9, v1.b[4]
-; CHECK-GI-NEXT: smov w11, v1.b[5]
-; CHECK-GI-NEXT: smov w18, v1.b[0]
-; CHECK-GI-NEXT: smov w0, v1.b[12]
-; CHECK-GI-NEXT: smov w3, v7.b[4]
-; CHECK-GI-NEXT: smov w12, v1.b[1]
-; CHECK-GI-NEXT: smov w13, v1.b[6]
-; CHECK-GI-NEXT: smov w1, v1.b[13]
-; CHECK-GI-NEXT: smov w4, v7.b[5]
-; CHECK-GI-NEXT: smov w15, v1.b[2]
-; CHECK-GI-NEXT: smov w8, v1.b[3]
-; CHECK-GI-NEXT: smov w16, v1.b[7]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: smov w14, v1.b[8]
-; CHECK-GI-NEXT: smov w17, v1.b[9]
-; CHECK-GI-NEXT: smov w10, v1.b[10]
-; CHECK-GI-NEXT: smov w9, v1.b[11]
-; CHECK-GI-NEXT: smov w5, v1.b[14]
-; CHECK-GI-NEXT: smov w6, v7.b[0]
-; CHECK-GI-NEXT: fmov s4, w0
-; CHECK-GI-NEXT: fmov s5, w3
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: smov w11, v1.b[15]
-; CHECK-GI-NEXT: fmov s1, w18
-; CHECK-GI-NEXT: smov w7, v7.b[1]
-; CHECK-GI-NEXT: smov w18, v7.b[6]
-; CHECK-GI-NEXT: smov w21, v16.b[4]
-; CHECK-GI-NEXT: mov v4.s[1], w1
-; CHECK-GI-NEXT: mov v5.s[1], w4
-; CHECK-GI-NEXT: fmov s6, w14
-; CHECK-GI-NEXT: mov v1.s[1], w12
-; CHECK-GI-NEXT: smov w12, v7.b[3]
-; CHECK-GI-NEXT: smov w14, v7.b[7]
-; CHECK-GI-NEXT: mov v2.s[2], w13
-; CHECK-GI-NEXT: smov w13, v7.b[2]
-; CHECK-GI-NEXT: smov w0, v7.b[8]
-; CHECK-GI-NEXT: fmov s7, w6
-; CHECK-GI-NEXT: smov w23, v16.b[12]
-; CHECK-GI-NEXT: smov w25, v3.b[4]
-; CHECK-GI-NEXT: mov v6.s[1], w17
-; CHECK-GI-NEXT: mov v4.s[2], w5
-; CHECK-GI-NEXT: mov v5.s[2], w18
-; CHECK-GI-NEXT: mov v1.s[2], w15
-; CHECK-GI-NEXT: smov w6, v16.b[0]
-; CHECK-GI-NEXT: smov w3, v16.b[1]
-; CHECK-GI-NEXT: mov v2.s[3], w16
-; CHECK-GI-NEXT: mov v7.s[1], w7
-; CHECK-GI-NEXT: smov w16, v16.b[2]
-; CHECK-GI-NEXT: smov w15, v16.b[3]
-; CHECK-GI-NEXT: smov w22, v16.b[5]
-; CHECK-GI-NEXT: smov w5, v16.b[6]
-; CHECK-GI-NEXT: smov w18, v16.b[7]
-; CHECK-GI-NEXT: smov w19, v16.b[8]
-; CHECK-GI-NEXT: smov w7, v16.b[9]
-; CHECK-GI-NEXT: smov w24, v16.b[13]
-; CHECK-GI-NEXT: smov w1, v16.b[10]
-; CHECK-GI-NEXT: smov w17, v16.b[11]
-; CHECK-GI-NEXT: smov w20, v16.b[14]
-; CHECK-GI-NEXT: smov w4, v16.b[15]
-; CHECK-GI-NEXT: fmov s16, w21
-; CHECK-GI-NEXT: smov w21, v3.b[8]
-; CHECK-GI-NEXT: smov w26, v3.b[5]
-; CHECK-GI-NEXT: fmov s17, w23
-; CHECK-GI-NEXT: smov w23, v3.b[0]
-; CHECK-GI-NEXT: fmov s18, w25
-; CHECK-GI-NEXT: smov w25, v3.b[3]
-; CHECK-GI-NEXT: mov v16.s[1], w22
-; CHECK-GI-NEXT: smov w22, v3.b[1]
-; CHECK-GI-NEXT: fmov s19, w6
-; CHECK-GI-NEXT: mov v17.s[1], w24
-; CHECK-GI-NEXT: smov w24, v3.b[2]
-; CHECK-GI-NEXT: smov w6, v3.b[7]
-; CHECK-GI-NEXT: mul w0, w0, w21
-; CHECK-GI-NEXT: mov v18.s[1], w26
-; CHECK-GI-NEXT: smov w26, v3.b[6]
-; CHECK-GI-NEXT: fmov s3, w19
-; CHECK-GI-NEXT: fmov s20, w23
-; CHECK-GI-NEXT: mov v19.s[1], w3
-; CHECK-GI-NEXT: mov v16.s[2], w5
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b5, v2.b[2]
+; CHECK-GI-NEXT: mov b6, v2.b[3]
+; CHECK-GI-NEXT: mov b7, v2.b[4]
+; CHECK-GI-NEXT: mov b16, v2.b[5]
+; CHECK-GI-NEXT: mov b17, v2.b[6]
+; CHECK-GI-NEXT: mov b18, v2.b[7]
+; CHECK-GI-NEXT: mov b19, v2.b[8]
+; CHECK-GI-NEXT: mov b20, v2.b[9]
+; CHECK-GI-NEXT: mov b21, v2.b[15]
+; CHECK-GI-NEXT: mov b3, v2.b[1]
+; CHECK-GI-NEXT: fmov w19, s2
+; CHECK-GI-NEXT: mov b22, v1.b[6]
+; CHECK-GI-NEXT: fmov w6, s5
+; CHECK-GI-NEXT: mov b5, v2.b[10]
+; CHECK-GI-NEXT: fmov w14, s6
+; CHECK-GI-NEXT: mov b6, v2.b[11]
+; CHECK-GI-NEXT: fmov w2, s7
+; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: mov b7, v2.b[12]
+; CHECK-GI-NEXT: fmov w11, s16
+; CHECK-GI-NEXT: sxtb w28, w19
+; CHECK-GI-NEXT: mov b16, v2.b[13]
+; CHECK-GI-NEXT: mov b18, v1.b[1]
+; CHECK-GI-NEXT: sxtb w6, w6
+; CHECK-GI-NEXT: mov b17, v2.b[14]
+; CHECK-GI-NEXT: ldp q4, q2, [x0]
+; CHECK-GI-NEXT: fmov w25, s19
+; CHECK-GI-NEXT: fmov w24, s20
+; CHECK-GI-NEXT: fmov w22, s5
+; CHECK-GI-NEXT: mov b5, v1.b[2]
+; CHECK-GI-NEXT: fmov w0, s6
+; CHECK-GI-NEXT: sxtb w14, w14
+; CHECK-GI-NEXT: mov b20, v1.b[3]
+; CHECK-GI-NEXT: fmov w16, s7
+; CHECK-GI-NEXT: mov b7, v1.b[4]
+; CHECK-GI-NEXT: fmov w15, s16
+; CHECK-GI-NEXT: sxtb w25, w25
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: mov b16, v1.b[5]
+; CHECK-GI-NEXT: fmov w13, s21
+; CHECK-GI-NEXT: sxtb w22, w22
+; CHECK-GI-NEXT: mov b6, v4.b[2]
+; CHECK-GI-NEXT: fmov w26, s18
+; CHECK-GI-NEXT: sxtb w0, w0
+; CHECK-GI-NEXT: mov b21, v1.b[7]
+; CHECK-GI-NEXT: mov b18, v4.b[4]
+; CHECK-GI-NEXT: fmov w7, s3
+; CHECK-GI-NEXT: mov b3, v4.b[1]
+; CHECK-GI-NEXT: fmov w12, s17
+; CHECK-GI-NEXT: fmov w5, s5
+; CHECK-GI-NEXT: mov b19, v4.b[3]
+; CHECK-GI-NEXT: fmov w4, s20
+; CHECK-GI-NEXT: fmov w3, s7
+; CHECK-GI-NEXT: sxtb w29, w7
+; CHECK-GI-NEXT: mov b17, v4.b[5]
+; CHECK-GI-NEXT: fmov w1, s16
+; CHECK-GI-NEXT: sxtb w5, w5
+; CHECK-GI-NEXT: mov b16, v4.b[6]
+; CHECK-GI-NEXT: fmov w18, s22
+; CHECK-GI-NEXT: mov b7, v4.b[7]
+; CHECK-GI-NEXT: fmov w17, s21
+; CHECK-GI-NEXT: mov b5, v4.b[8]
+; CHECK-GI-NEXT: mov b20, v4.b[9]
+; CHECK-GI-NEXT: fmov w27, s6
+; CHECK-GI-NEXT: mov b6, v4.b[10]
+; CHECK-GI-NEXT: mov b21, v4.b[11]
+; CHECK-GI-NEXT: fmov w21, s18
+; CHECK-GI-NEXT: mov b18, v4.b[12]
+; CHECK-GI-NEXT: mov b22, v4.b[13]
+; CHECK-GI-NEXT: mov b23, v4.b[14]
+; CHECK-GI-NEXT: fmov w10, s4
+; CHECK-GI-NEXT: sxtb w27, w27
+; CHECK-GI-NEXT: mov b24, v4.b[15]
+; CHECK-GI-NEXT: fmov s4, w25
+; CHECK-GI-NEXT: fmov w30, s3
+; CHECK-GI-NEXT: fmov s3, w28
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: fmov w7, s7
+; CHECK-GI-NEXT: mov b7, v2.b[1]
+; CHECK-GI-NEXT: mov v4.h[1], w24
+; CHECK-GI-NEXT: fmov w24, s1
+; CHECK-GI-NEXT: fmov w8, s20
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v3.h[1], w29
+; CHECK-GI-NEXT: fmov w29, s6
+; CHECK-GI-NEXT: fmov s6, w10
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov w19, s16
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b16, v2.b[3]
+; CHECK-GI-NEXT: sxtb w29, w29
+; CHECK-GI-NEXT: fmov w23, s19
+; CHECK-GI-NEXT: mov b19, v2.b[2]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: fmov s5, w24
+; CHECK-GI-NEXT: sxtb w24, w30
+; CHECK-GI-NEXT: mov v3.h[2], w6
+; CHECK-GI-NEXT: sxtb w6, w26
+; CHECK-GI-NEXT: fmov w28, s21
+; CHECK-GI-NEXT: sxtb w23, w23
+; CHECK-GI-NEXT: mov v6.h[1], w24
+; CHECK-GI-NEXT: fmov w24, s7
+; CHECK-GI-NEXT: fmov s7, w9
+; CHECK-GI-NEXT: fmov w9, s19
+; CHECK-GI-NEXT: mov v5.h[1], w6
+; CHECK-GI-NEXT: mov v4.h[2], w22
+; CHECK-GI-NEXT: fmov w20, s17
+; CHECK-GI-NEXT: mov b17, v2.b[4]
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: mov v3.h[3], w14
+; CHECK-GI-NEXT: sxtb w14, w2
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v7.h[1], w8
+; CHECK-GI-NEXT: fmov w8, s16
+; CHECK-GI-NEXT: fmov s16, w10
+; CHECK-GI-NEXT: mov v6.h[2], w27
+; CHECK-GI-NEXT: mov v5.h[2], w5
+; CHECK-GI-NEXT: fmov w25, s18
+; CHECK-GI-NEXT: mov v4.h[3], w0
+; CHECK-GI-NEXT: sxtb w0, w4
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b18, v2.b[5]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: mov v16.h[1], w24
+; CHECK-GI-NEXT: mov v7.h[2], w29
+; CHECK-GI-NEXT: mov v3.h[4], w14
+; CHECK-GI-NEXT: sxtb w14, w25
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v6.h[3], w23
+; CHECK-GI-NEXT: mov v5.h[3], w0
+; CHECK-GI-NEXT: fmov w26, s22
+; CHECK-GI-NEXT: mov b19, v2.b[6]
+; CHECK-GI-NEXT: fmov w27, s18
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w28
+; CHECK-GI-NEXT: fmov w22, s23
+; CHECK-GI-NEXT: mov b17, v2.b[7]
+; CHECK-GI-NEXT: fmov w6, s24
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w0
-; CHECK-GI-NEXT: mov v17.s[2], w20
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v3.s[1], w7
-; CHECK-GI-NEXT: mov v20.s[1], w22
-; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[2], w26
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v16.s[3], w18
-; CHECK-GI-NEXT: mov v17.s[3], w4
-; CHECK-GI-NEXT: mov v7.s[2], w13
-; CHECK-GI-NEXT: mov v5.s[3], w14
-; CHECK-GI-NEXT: mov v19.s[2], w16
-; CHECK-GI-NEXT: mov v3.s[2], w1
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: sxtb w9, w11
+; CHECK-GI-NEXT: sxtb w11, w21
+; CHECK-GI-NEXT: fmov w24, s19
+; CHECK-GI-NEXT: mov v16.h[3], w8
+; CHECK-GI-NEXT: sxtb w8, w16
+; CHECK-GI-NEXT: sxtb w16, w3
+; CHECK-GI-NEXT: mov v6.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v3.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w15
+; CHECK-GI-NEXT: sxtb w15, w27
+; CHECK-GI-NEXT: mov v7.h[4], w14
+; CHECK-GI-NEXT: sxtb w14, w1
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v4.h[4], w8
+; CHECK-GI-NEXT: sxtb w8, w20
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.h[4], w16
+; CHECK-GI-NEXT: mov v16.h[4], w10
+; CHECK-GI-NEXT: sxtb w10, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v6.h[5], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v7.h[5], w10
+; CHECK-GI-NEXT: sxtb w10, w12
+; CHECK-GI-NEXT: sxtb w12, w18
+; CHECK-GI-NEXT: mov v4.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w19
+; CHECK-GI-NEXT: mov v5.h[5], w14
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v16.h[5], w15
+; CHECK-GI-NEXT: mov v3.h[6], w11
+; CHECK-GI-NEXT: sxtb w11, w22
+; CHECK-GI-NEXT: mov v6.h[6], w9
+; CHECK-GI-NEXT: sxtb w9, w13
+; CHECK-GI-NEXT: sxtb w13, w24
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v20.s[2], w24
-; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[3], w6
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s
-; CHECK-GI-NEXT: mov v1.s[3], w8
-; CHECK-GI-NEXT: mov v6.s[3], w9
-; CHECK-GI-NEXT: mov v7.s[3], w12
-; CHECK-GI-NEXT: mov v19.s[3], w15
-; CHECK-GI-NEXT: mov v3.s[3], w17
-; CHECK-GI-NEXT: mov v20.s[3], w25
+; CHECK-GI-NEXT: mov v7.h[6], w11
+; CHECK-GI-NEXT: fmov w11, s17
+; CHECK-GI-NEXT: mov v4.h[6], w10
+; CHECK-GI-NEXT: sxtb w10, w7
+; CHECK-GI-NEXT: mov v5.h[6], w12
+; CHECK-GI-NEXT: mov v16.h[6], w13
+; CHECK-GI-NEXT: mov v3.h[7], w8
+; CHECK-GI-NEXT: sxtb w8, w6
+; CHECK-GI-NEXT: smov w12, v1.b[8]
+; CHECK-GI-NEXT: mov v6.h[7], w10
+; CHECK-GI-NEXT: sxtb w10, w17
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v4.h[7], w9
+; CHECK-GI-NEXT: mov v7.h[7], w8
+; CHECK-GI-NEXT: smov w8, v2.b[8]
+; CHECK-GI-NEXT: mov v5.h[7], w10
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[7], w11
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s
-; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s
-; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
-; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h
+; CHECK-GI-NEXT: mul w16, w12, w8
+; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h
+; CHECK-GI-NEXT: smov w17, v3.h[0]
+; CHECK-GI-NEXT: smov w0, v3.h[4]
+; CHECK-GI-NEXT: sxth w16, w16
+; CHECK-GI-NEXT: smov w2, v2.h[0]
+; CHECK-GI-NEXT: smov w4, v2.h[4]
+; CHECK-GI-NEXT: smov w18, v3.h[1]
+; CHECK-GI-NEXT: smov w1, v3.h[5]
+; CHECK-GI-NEXT: smov w3, v2.h[1]
+; CHECK-GI-NEXT: smov w5, v2.h[5]
+; CHECK-GI-NEXT: smov w6, v1.h[0]
+; CHECK-GI-NEXT: smov w19, v1.h[4]
+; CHECK-GI-NEXT: smov w7, v1.h[1]
+; CHECK-GI-NEXT: smov w20, v1.h[5]
+; CHECK-GI-NEXT: smov w10, v3.h[2]
+; CHECK-GI-NEXT: smov w8, v3.h[3]
+; CHECK-GI-NEXT: smov w11, v3.h[6]
+; CHECK-GI-NEXT: smov w9, v3.h[7]
+; CHECK-GI-NEXT: fmov s3, w17
+; CHECK-GI-NEXT: fmov s4, w0
+; CHECK-GI-NEXT: fmov s5, w2
+; CHECK-GI-NEXT: fmov s6, w4
+; CHECK-GI-NEXT: fmov s7, w6
+; CHECK-GI-NEXT: fmov s16, w19
+; CHECK-GI-NEXT: fmov s17, w16
+; CHECK-GI-NEXT: smov w12, v2.h[2]
+; CHECK-GI-NEXT: smov w13, v2.h[6]
+; CHECK-GI-NEXT: smov w14, v1.h[2]
+; CHECK-GI-NEXT: smov w15, v1.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w18
+; CHECK-GI-NEXT: mov v4.s[1], w1
+; CHECK-GI-NEXT: mov v5.s[1], w3
+; CHECK-GI-NEXT: mov v6.s[1], w5
+; CHECK-GI-NEXT: mov v7.s[1], w7
+; CHECK-GI-NEXT: mov v16.s[1], w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[1], wzr
+; CHECK-GI-NEXT: smov w16, v2.h[3]
+; CHECK-GI-NEXT: smov w17, v2.h[7]
+; CHECK-GI-NEXT: smov w18, v1.h[3]
+; CHECK-GI-NEXT: smov w0, v1.h[7]
+; CHECK-GI-NEXT: mov v3.s[2], w10
+; CHECK-GI-NEXT: mov v4.s[2], w11
+; CHECK-GI-NEXT: mov v5.s[2], w12
+; CHECK-GI-NEXT: mov v6.s[2], w13
+; CHECK-GI-NEXT: mov v7.s[2], w14
+; CHECK-GI-NEXT: mov v16.s[2], w15
+; CHECK-GI-NEXT: mov v17.s[2], wzr
+; CHECK-GI-NEXT: mov v3.s[3], w8
+; CHECK-GI-NEXT: mov v4.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.s[3], w16
+; CHECK-GI-NEXT: mov v6.s[3], w17
+; CHECK-GI-NEXT: mov v7.s[3], w18
+; CHECK-GI-NEXT: mov v16.s[3], w0
+; CHECK-GI-NEXT: mov v17.s[3], wzr
+; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s
+; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <25 x i8>, ptr %a
@@ -2948,349 +3331,535 @@ define i32 @test_sdot_v25i8_double(<25 x i8> %a, <25 x i8> %b, <25 x i8> %c, <25
;
; CHECK-GI-LABEL: test_sdot_v25i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp d11, d10, [sp, #-48]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: str x29, [sp, #32] // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 48
+; CHECK-GI-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
; CHECK-GI-NEXT: .cfi_offset w29, -16
-; CHECK-GI-NEXT: .cfi_offset b8, -24
-; CHECK-GI-NEXT: .cfi_offset b9, -32
-; CHECK-GI-NEXT: .cfi_offset b10, -40
-; CHECK-GI-NEXT: .cfi_offset b11, -48
-; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: sxtb w10, w4
-; CHECK-GI-NEXT: sxtb w9, w1
-; CHECK-GI-NEXT: sxtb w11, w2
-; CHECK-GI-NEXT: sxtb w13, w6
-; CHECK-GI-NEXT: ldr w12, [sp, #72]
+; CHECK-GI-NEXT: lsl w8, w0, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #16]
+; CHECK-GI-NEXT: lsl w10, w1, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #24]
+; CHECK-GI-NEXT: lsl w12, w4, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #56]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #64]
; CHECK-GI-NEXT: fmov s2, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #48]
-; CHECK-GI-NEXT: fmov s4, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #80]
-; CHECK-GI-NEXT: ldr w14, [sp, #128]
-; CHECK-GI-NEXT: ldr w15, [sp, #152]
-; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w8, w11, #8
+; CHECK-GI-NEXT: lsl w11, w2, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s4, w9
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #112]
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #32]
+; CHECK-GI-NEXT: sbfx w9, w11, #8, #8
+; CHECK-GI-NEXT: lsl w11, w3, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v4.h[1], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #152]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: mov v2.s[1], w9
-; CHECK-GI-NEXT: sxtb w9, w5
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: fmov s3, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #88]
-; CHECK-GI-NEXT: ldr x29, [sp, #32] // 8-byte Folded Reload
-; CHECK-GI-NEXT: mov v4.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #56]
-; CHECK-GI-NEXT: fmov s5, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w3
+; CHECK-GI-NEXT: mov v2.h[2], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #40]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: mov v1.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #64]
-; CHECK-GI-NEXT: mov v5.s[1], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #104]
+; CHECK-GI-NEXT: mov v4.h[2], w10
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #160]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v3.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #96]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v4.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #120]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #112]
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w11
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #48]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: fmov s3, w8
; CHECK-GI-NEXT: mov v1.s[2], wzr
+; CHECK-GI-NEXT: mov v4.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #80]
+; CHECK-GI-NEXT: lsl w8, w11, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #168]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v3.s[2], w11
-; CHECK-GI-NEXT: sxtb w11, w10
-; CHECK-GI-NEXT: mov v5.s[2], w9
-; CHECK-GI-NEXT: sxtb w9, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #144]
-; CHECK-GI-NEXT: ldr w10, [sp, #136]
-; CHECK-GI-NEXT: fmov s6, w11
-; CHECK-GI-NEXT: sxtb w11, w7
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[4], w12
+; CHECK-GI-NEXT: lsl w12, w5, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v3.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #88]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v4.h[4], w8
+; CHECK-GI-NEXT: lsl w8, w10, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #176]
+; CHECK-GI-NEXT: mov v2.h[5], w12
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: lsl w12, w6, #8
+; CHECK-GI-NEXT: fmov s6, w9
+; CHECK-GI-NEXT: sbfx w15, w8, #8, #8
+; CHECK-GI-NEXT: lsl w9, w10, #8
+; CHECK-GI-NEXT: mov v3.h[2], w11
+; CHECK-GI-NEXT: sbfx w11, w12, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #96]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v4.h[5], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #224]
+; CHECK-GI-NEXT: mov v6.h[1], w15
+; CHECK-GI-NEXT: mov v2.h[6], w11
+; CHECK-GI-NEXT: lsl w15, w7, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #184]
+; CHECK-GI-NEXT: ldr w12, [sp, #104]
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #216]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v2.h[7], w15
+; CHECK-GI-NEXT: lsl w15, w9, #8
+; CHECK-GI-NEXT: mov v4.h[6], w14
+; CHECK-GI-NEXT: mov v6.h[2], w10
+; CHECK-GI-NEXT: lsl w10, w13, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w15, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #288]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v3.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #192]
+; CHECK-GI-NEXT: fmov s5, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #232]
+; CHECK-GI-NEXT: ldr w9, [sp, #120]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v6.h[3], w12
+; CHECK-GI-NEXT: ldr w8, [sp, #72]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: mov v5.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #184]
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: mov v6.s[1], w9
-; CHECK-GI-NEXT: fmov s7, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #216]
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: sxtb w12, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w11, [sp, #160]
-; CHECK-GI-NEXT: mov v7.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #224]
-; CHECK-GI-NEXT: mov v3.s[3], w9
-; CHECK-GI-NEXT: mov v6.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #192]
-; CHECK-GI-NEXT: fmov s16, w8
-; CHECK-GI-NEXT: fmov s18, w13
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #168]
-; CHECK-GI-NEXT: ldr w13, [sp, #208]
-; CHECK-GI-NEXT: mov v7.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #256]
-; CHECK-GI-NEXT: ldr w8, [sp, #176]
-; CHECK-GI-NEXT: mov v16.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #200]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #232]
-; CHECK-GI-NEXT: mov v6.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #248]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #240]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v18.s[2], w14
-; CHECK-GI-NEXT: fmov s17, w10
+; CHECK-GI-NEXT: mov v5.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #280]
+; CHECK-GI-NEXT: sbfx w15, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w13, #8, #8
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #240]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v3.h[5], w15
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v5.h[2], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #296]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: ldr w10, [sp, #200]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v6.h[4], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #304]
+; CHECK-GI-NEXT: ldr w11, [sp, #128]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v5.h[3], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #208]
+; CHECK-GI-NEXT: mov v7.h[1], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #248]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v6.h[5], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #272]
+; CHECK-GI-NEXT: mov v3.h[6], w10
+; CHECK-GI-NEXT: lsl w10, w14, #8
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: mov v7.h[2], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #256]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #320]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: mov v5.h[4], w13
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #312]
+; CHECK-GI-NEXT: mov v3.h[7], w10
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v4.h[7], w8
+; CHECK-GI-NEXT: mov v7.h[3], w14
; CHECK-GI-NEXT: ldr w14, [sp, #264]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #288]
-; CHECK-GI-NEXT: ldr w10, [sp, #272]
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w15, [sp, #392]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #280]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v18.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #312]
-; CHECK-GI-NEXT: mov v16.s[3], w13
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w13, [sp, #296]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w8, w9, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #136]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v5.h[5], w12
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mul v16.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w12, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: mov v6.h[6], w11
+; CHECK-GI-NEXT: mov v7.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #328]
+; CHECK-GI-NEXT: ldr w10, [sp, #144]
+; CHECK-GI-NEXT: mov v5.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #336]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: smov w9, v16.h[0]
+; CHECK-GI-NEXT: smov w15, v16.h[4]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: smov w17, v16.h[5]
; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w12, w12
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mov v17.s[2], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #320]
-; CHECK-GI-NEXT: fmov s20, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #344]
-; CHECK-GI-NEXT: fmov s19, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: ldr w12, [sp, #304]
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v18.4s
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v20.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #352]
-; CHECK-GI-NEXT: mov v19.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #328]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s21, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #336]
+; CHECK-GI-NEXT: sbfx w11, w13, #8, #8
+; CHECK-GI-NEXT: smov w13, v16.h[1]
+; CHECK-GI-NEXT: mov v7.h[5], w14
+; CHECK-GI-NEXT: mov v5.h[7], w8
+; CHECK-GI-NEXT: ldr w14, [sp, #344]
+; CHECK-GI-NEXT: ldr w8, [sp, #352]
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: fmov s3, w15
+; CHECK-GI-NEXT: lsl w9, w12, #8
+; CHECK-GI-NEXT: sbfx w12, w16, #8, #8
; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: mov v17.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #376]
-; CHECK-GI-NEXT: mov v20.s[2], w13
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w13, [sp, #368]
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #360]
-; CHECK-GI-NEXT: mov v19.s[2], w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w14, [sp, #384]
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v20.s[3], w12
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: mul w10, w8, w10
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: mov v19.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #416]
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: ldr w9, [sp, #400]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s22, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #432]
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #448]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v21.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #424]
-; CHECK-GI-NEXT: fmov s25, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #480]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v23.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #456]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s24, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #440]
-; CHECK-GI-NEXT: mov v25.s[1], w12
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: mov v7.h[6], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #360]
+; CHECK-GI-NEXT: smov w15, v16.h[3]
+; CHECK-GI-NEXT: mov v2.s[1], w13
+; CHECK-GI-NEXT: smov w13, v16.h[2]
+; CHECK-GI-NEXT: mov v6.h[7], w12
+; CHECK-GI-NEXT: smov w12, v16.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w17
+; CHECK-GI-NEXT: mul v18.8h, v4.8h, v5.8h
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w16, w9, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #368]
+; CHECK-GI-NEXT: mov v2.s[2], w13
+; CHECK-GI-NEXT: smov w13, v16.h[7]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v3.s[2], w12
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: mul w8, w10, w14
+; CHECK-GI-NEXT: smov w10, v18.h[0]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #376]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: smov w12, v18.h[1]
+; CHECK-GI-NEXT: mov v7.h[7], w16
+; CHECK-GI-NEXT: mov v2.s[3], w15
+; CHECK-GI-NEXT: smov w15, v18.h[4]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v3.s[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #416]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s4, w10
+; CHECK-GI-NEXT: mov v16.h[1], w11
+; CHECK-GI-NEXT: ldr w10, [sp, #424]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #384]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: fmov s5, w15
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #432]
+; CHECK-GI-NEXT: mov v4.s[1], w12
+; CHECK-GI-NEXT: smov w12, v18.h[5]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mul v7.8h, v6.8h, v7.8h
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #392]
+; CHECK-GI-NEXT: ldr w13, [sp, #400]
+; CHECK-GI-NEXT: mov v5.s[1], w12
+; CHECK-GI-NEXT: smov w12, v18.h[2]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v17.h[1], w10
+; CHECK-GI-NEXT: mov v16.h[3], w14
+; CHECK-GI-NEXT: ldr w10, [sp, #440]
+; CHECK-GI-NEXT: smov w14, v18.h[6]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #456]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sxth w8, w8
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v4.s[2], w12
+; CHECK-GI-NEXT: smov w12, v18.h[3]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v17.h[2], w15
+; CHECK-GI-NEXT: mov v16.h[4], w11
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v5.s[2], w14
+; CHECK-GI-NEXT: smov w14, v18.h[7]
+; CHECK-GI-NEXT: ldr w15, [sp, #448]
+; CHECK-GI-NEXT: ldr w11, [sp, #408]
+; CHECK-GI-NEXT: mov v4.s[3], w12
+; CHECK-GI-NEXT: smov w12, v7.h[0]
+; CHECK-GI-NEXT: mov v17.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #480]
+; CHECK-GI-NEXT: mov v16.h[5], w9
+; CHECK-GI-NEXT: lsl w9, w13, #8
+; CHECK-GI-NEXT: lsl w13, w15, #8
+; CHECK-GI-NEXT: mov v5.s[3], w14
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: smov w14, v7.h[1]
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: fmov s6, w12
; CHECK-GI-NEXT: ldr w12, [sp, #488]
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: fmov s26, w11
-; CHECK-GI-NEXT: ldr w15, [sp, #504]
-; CHECK-GI-NEXT: ldr w11, [sp, #472]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v24.s[1], w14
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v17.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #496]
+; CHECK-GI-NEXT: fmov s18, w10
+; CHECK-GI-NEXT: ldr w10, [sp, #552]
+; CHECK-GI-NEXT: mov v6.s[1], w14
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
; CHECK-GI-NEXT: ldr w14, [sp, #464]
-; CHECK-GI-NEXT: mov v23.s[2], w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w8, [sp, #408]
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #496]
-; CHECK-GI-NEXT: mov v25.s[2], w10
+; CHECK-GI-NEXT: mov v16.h[6], w9
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v18.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #560]
+; CHECK-GI-NEXT: mov v17.h[5], w15
+; CHECK-GI-NEXT: sbfx w15, w10, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
; CHECK-GI-NEXT: ldr w10, [sp, #512]
-; CHECK-GI-NEXT: sxtb w9, w14
-; CHECK-GI-NEXT: ldr w14, [sp, #520]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v22.s[1], wzr
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v24.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #528]
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w13, w15
-; CHECK-GI-NEXT: fmov s27, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #584]
-; CHECK-GI-NEXT: ldr w15, [sp, #552]
-; CHECK-GI-NEXT: mov v25.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #544]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v24.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #560]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v26.s[3], w13
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s28, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #616]
-; CHECK-GI-NEXT: mov v27.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #592]
-; CHECK-GI-NEXT: ldr w15, [sp, #568]
-; CHECK-GI-NEXT: mov v23.s[3], w8
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w8, [sp, #536]
-; CHECK-GI-NEXT: ldr w10, [sp, #576]
-; CHECK-GI-NEXT: mov v28.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #624]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s30, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #600]
-; CHECK-GI-NEXT: mov v27.s[2], w9
-; CHECK-GI-NEXT: mov v29.s[1], w13
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #608]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v30.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #632]
-; CHECK-GI-NEXT: mov v28.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #640]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v29.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #648]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #664]
-; CHECK-GI-NEXT: mov v30.s[2], w13
-; CHECK-GI-NEXT: mov v28.s[3], w14
+; CHECK-GI-NEXT: fmov s19, w15
+; CHECK-GI-NEXT: ldr w15, [sp, #616]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v16.h[7], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #504]
+; CHECK-GI-NEXT: mov v18.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #568]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v17.h[6], w14
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #576]
+; CHECK-GI-NEXT: mov v19.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #624]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w11, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: fmov s20, w14
; CHECK-GI-NEXT: ldr w14, [sp, #680]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w13, [sp, #656]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: mov v29.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #688]
-; CHECK-GI-NEXT: fmov s31, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w12, [sp, #752]
-; CHECK-GI-NEXT: mov v30.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #744]
-; CHECK-GI-NEXT: fmov s8, w14
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w14, [sp, #712]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v31.s[1], w13
+; CHECK-GI-NEXT: mov v18.h[3], w16
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v19.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #632]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v20.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #688]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v18.h[4], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #584]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v19.h[3], w15
+; CHECK-GI-NEXT: fmov s21, w14
+; CHECK-GI-NEXT: ldr w15, [sp, #640]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v20.h[2], w13
; CHECK-GI-NEXT: ldr w13, [sp, #696]
-; CHECK-GI-NEXT: mov v8.s[1], w9
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: ldr w9, [sp, #720]
-; CHECK-GI-NEXT: fmov s9, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #776]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s10, w14
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v22.s[2], wzr
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v31.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #704]
-; CHECK-GI-NEXT: mov v9.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #760]
-; CHECK-GI-NEXT: mov v8.s[2], w13
-; CHECK-GI-NEXT: mul w10, w10, w11
-; CHECK-GI-NEXT: mov v10.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #728]
-; CHECK-GI-NEXT: sxtb w11, w12
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v20.4s
-; CHECK-GI-NEXT: mul v7.4s, v7.4s, v21.4s
-; CHECK-GI-NEXT: mul v18.4s, v25.4s, v30.4s
-; CHECK-GI-NEXT: mov v22.s[3], wzr
-; CHECK-GI-NEXT: fmov s11, w10
-; CHECK-GI-NEXT: mov v9.s[2], w11
-; CHECK-GI-NEXT: ldr w10, [sp, #768]
-; CHECK-GI-NEXT: mov v8.s[3], w8
-; CHECK-GI-NEXT: sxtb w8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #672]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #520]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v21.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #592]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v19.h[4], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #704]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v20.h[3], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #648]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v21.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #600]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v18.h[5], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #712]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v19.h[5], w12
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #656]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v21.h[3], w10
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #608]
+; CHECK-GI-NEXT: mov v20.h[4], w15
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #528]
+; CHECK-GI-NEXT: ldr w15, [sp, #664]
+; CHECK-GI-NEXT: mov v19.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #720]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v21.h[4], w11
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w16, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v20.h[5], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #728]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v19.h[7], w16
+; CHECK-GI-NEXT: ldr w9, [sp, #472]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v18.h[6], w14
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: mov v21.h[5], w13
+; CHECK-GI-NEXT: ldr w15, [sp, #672]
+; CHECK-GI-NEXT: ldr w11, [sp, #536]
+; CHECK-GI-NEXT: ldr w13, [sp, #736]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: mov v20.h[6], w14
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mul v19.8h, v16.8h, v19.8h
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v21.h[6], w12
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: smov w14, v7.h[2]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: smov w12, v7.h[4]
+; CHECK-GI-NEXT: mov v17.h[7], w9
+; CHECK-GI-NEXT: mov v20.h[7], w15
+; CHECK-GI-NEXT: smov w9, v7.h[5]
+; CHECK-GI-NEXT: mov v18.h[7], w11
+; CHECK-GI-NEXT: smov w11, v19.h[4]
+; CHECK-GI-NEXT: ldr w15, [sp, #744]
+; CHECK-GI-NEXT: mov v21.h[7], w13
+; CHECK-GI-NEXT: mov v6.s[2], w14
+; CHECK-GI-NEXT: smov w14, v19.h[0]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: smov w13, v19.h[5]
+; CHECK-GI-NEXT: smov w12, v19.h[1]
+; CHECK-GI-NEXT: mul v20.8h, v17.8h, v20.8h
+; CHECK-GI-NEXT: ldr w10, [sp, #544]
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: mul v22.8h, v18.8h, v21.8h
+; CHECK-GI-NEXT: fmov s18, w11
+; CHECK-GI-NEXT: mov v16.s[1], w9
+; CHECK-GI-NEXT: fmov s17, w14
+; CHECK-GI-NEXT: smov w14, v7.h[6]
+; CHECK-GI-NEXT: smov w11, v19.h[2]
+; CHECK-GI-NEXT: smov w9, v7.h[3]
; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mla v5.4s, v3.4s, v17.4s
-; CHECK-GI-NEXT: mov v11.s[1], wzr
-; CHECK-GI-NEXT: mov v10.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #736]
-; CHECK-GI-NEXT: mov v9.s[3], w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mla v7.4s, v6.4s, v19.4s
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mul v20.4s, v26.4s, v8.4s
-; CHECK-GI-NEXT: mla v18.4s, v23.4s, v29.4s
-; CHECK-GI-NEXT: mov v31.s[3], w9
-; CHECK-GI-NEXT: add v1.4s, v22.4s, v1.4s
-; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s
-; CHECK-GI-NEXT: mov v11.s[2], wzr
-; CHECK-GI-NEXT: mov v10.s[3], w8
-; CHECK-GI-NEXT: mul v21.4s, v28.4s, v9.4s
-; CHECK-GI-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v7.4s, v1.4s
-; CHECK-GI-NEXT: mla v20.4s, v24.4s, v31.4s
-; CHECK-GI-NEXT: mov v11.s[3], wzr
-; CHECK-GI-NEXT: mla v21.4s, v27.4s, v10.4s
+; CHECK-GI-NEXT: fmov s21, w8
+; CHECK-GI-NEXT: mov v18.s[1], w13
+; CHECK-GI-NEXT: sxtb w13, w15
+; CHECK-GI-NEXT: smov w15, v20.h[0]
+; CHECK-GI-NEXT: mov v17.s[1], w12
+; CHECK-GI-NEXT: smov w8, v7.h[7]
+; CHECK-GI-NEXT: smov w12, v19.h[6]
+; CHECK-GI-NEXT: mov v16.s[2], w14
+; CHECK-GI-NEXT: smov w14, v20.h[1]
+; CHECK-GI-NEXT: mul w10, w10, w13
+; CHECK-GI-NEXT: smov w13, v20.h[4]
+; CHECK-GI-NEXT: smov w16, v20.h[5]
+; CHECK-GI-NEXT: mov v21.s[1], wzr
+; CHECK-GI-NEXT: fmov s7, w15
+; CHECK-GI-NEXT: smov w15, v20.h[2]
+; CHECK-GI-NEXT: mov v6.s[3], w9
+; CHECK-GI-NEXT: mov v17.s[2], w11
+; CHECK-GI-NEXT: smov w11, v22.h[0]
+; CHECK-GI-NEXT: sxth w10, w10
+; CHECK-GI-NEXT: mov v18.s[2], w12
+; CHECK-GI-NEXT: smov w12, v22.h[1]
+; CHECK-GI-NEXT: mov v16.s[3], w8
+; CHECK-GI-NEXT: mov v7.s[1], w14
+; CHECK-GI-NEXT: smov w14, v22.h[4]
+; CHECK-GI-NEXT: fmov s23, w13
+; CHECK-GI-NEXT: smov w13, v22.h[5]
+; CHECK-GI-NEXT: fmov s26, w10
+; CHECK-GI-NEXT: smov w10, v19.h[7]
+; CHECK-GI-NEXT: fmov s24, w11
+; CHECK-GI-NEXT: smov w11, v20.h[6]
+; CHECK-GI-NEXT: mov v21.s[2], wzr
+; CHECK-GI-NEXT: mov v23.s[1], w16
+; CHECK-GI-NEXT: add v4.4s, v6.4s, v16.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: fmov s25, w14
+; CHECK-GI-NEXT: smov w14, v22.h[2]
+; CHECK-GI-NEXT: mov v26.s[1], wzr
+; CHECK-GI-NEXT: mov v24.s[1], w12
+; CHECK-GI-NEXT: smov w12, v19.h[3]
+; CHECK-GI-NEXT: mov v7.s[2], w15
+; CHECK-GI-NEXT: smov w15, v20.h[3]
+; CHECK-GI-NEXT: mov v18.s[3], w10
+; CHECK-GI-NEXT: mov v21.s[3], wzr
+; CHECK-GI-NEXT: mov v25.s[1], w13
+; CHECK-GI-NEXT: smov w13, v22.h[6]
+; CHECK-GI-NEXT: mov v23.s[2], w11
+; CHECK-GI-NEXT: smov w11, v20.h[7]
+; CHECK-GI-NEXT: mov v26.s[2], wzr
+; CHECK-GI-NEXT: mov v24.s[2], w14
+; CHECK-GI-NEXT: smov w14, v22.h[3]
+; CHECK-GI-NEXT: mov v17.s[3], w12
+; CHECK-GI-NEXT: mov v7.s[3], w15
+; CHECK-GI-NEXT: add v1.4s, v21.4s, v1.4s
+; CHECK-GI-NEXT: mov v25.s[2], w13
+; CHECK-GI-NEXT: smov w13, v22.h[7]
+; CHECK-GI-NEXT: mov v23.s[3], w11
+; CHECK-GI-NEXT: mov v26.s[3], wzr
+; CHECK-GI-NEXT: mov v24.s[3], w14
+; CHECK-GI-NEXT: add v5.4s, v17.4s, v18.4s
+; CHECK-GI-NEXT: add v1.4s, v4.4s, v1.4s
+; CHECK-GI-NEXT: mov v25.s[3], w13
+; CHECK-GI-NEXT: add v6.4s, v7.4s, v23.4s
+; CHECK-GI-NEXT: add v0.4s, v26.4s, v0.4s
; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: add v3.4s, v18.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v11.4s, v0.4s
+; CHECK-GI-NEXT: add v7.4s, v24.4s, v25.4s
+; CHECK-GI-NEXT: add v3.4s, v5.4s, v6.4s
; CHECK-GI-NEXT: addv s1, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
+; CHECK-GI-NEXT: add v0.4s, v7.4s, v0.4s
; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w9, s0
; CHECK-GI-NEXT: add w0, w8, w9
-; CHECK-GI-NEXT: ldp d11, d10, [sp], #48 // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-GI-NEXT: ret
entry:
%az = sext <25 x i8> %a to <25 x i32>
@@ -3972,197 +4541,412 @@ define i32 @test_udot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_udot_v33i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: .cfi_offset b8, -16
-; CHECK-GI-NEXT: ldp q21, q25, [x1]
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NEXT: .cfi_offset w23, -40
+; CHECK-GI-NEXT: .cfi_offset w24, -48
+; CHECK-GI-NEXT: .cfi_offset w25, -56
+; CHECK-GI-NEXT: .cfi_offset w26, -64
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q7, q16, [x1]
; CHECK-GI-NEXT: fmov s5, wzr
-; CHECK-GI-NEXT: ldp q26, q22, [x0]
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
; CHECK-GI-NEXT: fmov s6, wzr
; CHECK-GI-NEXT: fmov s0, wzr
; CHECK-GI-NEXT: fmov s1, wzr
; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: umov w8, v21.b[0]
-; CHECK-GI-NEXT: umov w9, v21.b[4]
-; CHECK-GI-NEXT: umov w10, v21.b[1]
-; CHECK-GI-NEXT: umov w13, v21.b[8]
-; CHECK-GI-NEXT: umov w11, v21.b[5]
-; CHECK-GI-NEXT: umov w14, v21.b[9]
-; CHECK-GI-NEXT: umov w15, v25.b[0]
-; CHECK-GI-NEXT: umov w12, v21.b[2]
; CHECK-GI-NEXT: fmov s2, wzr
+; CHECK-GI-NEXT: mov b23, v7.b[7]
+; CHECK-GI-NEXT: mov b17, v7.b[1]
+; CHECK-GI-NEXT: fmov w11, s7
+; CHECK-GI-NEXT: mov b18, v7.b[2]
+; CHECK-GI-NEXT: mov b19, v7.b[3]
+; CHECK-GI-NEXT: mov b20, v7.b[4]
+; CHECK-GI-NEXT: mov b21, v7.b[5]
+; CHECK-GI-NEXT: mov b22, v7.b[6]
+; CHECK-GI-NEXT: mov b24, v7.b[8]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov b25, v7.b[9]
+; CHECK-GI-NEXT: mov b26, v7.b[10]
+; CHECK-GI-NEXT: mov b27, v7.b[11]
+; CHECK-GI-NEXT: mov b28, v7.b[12]
+; CHECK-GI-NEXT: mov b29, v7.b[13]
+; CHECK-GI-NEXT: mov b30, v7.b[14]
+; CHECK-GI-NEXT: mov b7, v7.b[15]
+; CHECK-GI-NEXT: fmov w7, s23
+; CHECK-GI-NEXT: mov b23, v16.b[7]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: fmov w9, s18
+; CHECK-GI-NEXT: fmov w13, s19
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b17, v16.b[2]
+; CHECK-GI-NEXT: fmov w12, s20
+; CHECK-GI-NEXT: fmov w16, s25
+; CHECK-GI-NEXT: fmov w23, s21
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b18, v16.b[1]
+; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: fmov s7, w11
+; CHECK-GI-NEXT: fmov w5, s17
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: mov b21, v16.b[5]
+; CHECK-GI-NEXT: fmov s17, w8
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: fmov w20, s22
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: uxtb w10, w16
+; CHECK-GI-NEXT: mov b19, v16.b[3]
+; CHECK-GI-NEXT: mov b22, v16.b[4]
+; CHECK-GI-NEXT: mov b20, v16.b[6]
+; CHECK-GI-NEXT: fmov w21, s27
+; CHECK-GI-NEXT: mov v17.h[1], w10
+; CHECK-GI-NEXT: fmov w24, s28
+; CHECK-GI-NEXT: mov b24, v16.b[8]
+; CHECK-GI-NEXT: fmov w22, s29
+; CHECK-GI-NEXT: mov b26, v16.b[9]
+; CHECK-GI-NEXT: fmov w4, s30
+; CHECK-GI-NEXT: uxtb w10, w21
+; CHECK-GI-NEXT: mov v7.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w13
+; CHECK-GI-NEXT: str s20, [sp] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b25, v16.b[10]
+; CHECK-GI-NEXT: fmov w25, s18
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov b27, v16.b[11]
+; CHECK-GI-NEXT: mov b28, v16.b[12]
+; CHECK-GI-NEXT: mov b29, v16.b[13]
+; CHECK-GI-NEXT: mov b30, v16.b[14]
+; CHECK-GI-NEXT: fmov w26, s16
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w27
+; CHECK-GI-NEXT: mov b31, v16.b[15]
+; CHECK-GI-NEXT: ldp q18, q16, [x0]
+; CHECK-GI-NEXT: fmov w2, s21
+; CHECK-GI-NEXT: uxtb w26, w26
+; CHECK-GI-NEXT: mov v17.h[2], w9
+; CHECK-GI-NEXT: fmov w14, s22
+; CHECK-GI-NEXT: fmov w3, s25
+; CHECK-GI-NEXT: fmov w15, s19
+; CHECK-GI-NEXT: fmov w19, s24
+; CHECK-GI-NEXT: mov v7.h[4], w8
+; CHECK-GI-NEXT: uxtb w8, w23
+; CHECK-GI-NEXT: mov b21, v18.b[2]
+; CHECK-GI-NEXT: mov b22, v18.b[1]
+; CHECK-GI-NEXT: mov b25, v18.b[5]
+; CHECK-GI-NEXT: mov b23, v18.b[6]
+; CHECK-GI-NEXT: uxtb w19, w19
+; CHECK-GI-NEXT: uxtb w3, w3
+; CHECK-GI-NEXT: mov v17.h[3], w10
+; CHECK-GI-NEXT: uxtb w10, w24
+; CHECK-GI-NEXT: uxtb w24, w7
+; CHECK-GI-NEXT: mov b19, v18.b[3]
+; CHECK-GI-NEXT: mov v7.h[5], w8
+; CHECK-GI-NEXT: uxtb w8, w20
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov b21, v18.b[10]
+; CHECK-GI-NEXT: fmov w9, s22
+; CHECK-GI-NEXT: fmov w6, s26
+; CHECK-GI-NEXT: mov v17.h[4], w10
+; CHECK-GI-NEXT: uxtb w10, w25
+; CHECK-GI-NEXT: fmov w17, s27
+; CHECK-GI-NEXT: mov b26, v18.b[4]
+; CHECK-GI-NEXT: fmov w18, s28
+; CHECK-GI-NEXT: fmov w16, s29
+; CHECK-GI-NEXT: mov v7.h[6], w8
+; CHECK-GI-NEXT: fmov w8, s18
+; CHECK-GI-NEXT: mov b24, v18.b[7]
+; CHECK-GI-NEXT: fmov w30, s21
+; CHECK-GI-NEXT: mov b20, v18.b[8]
+; CHECK-GI-NEXT: mov b27, v18.b[9]
+; CHECK-GI-NEXT: uxtb w16, w16
+; CHECK-GI-NEXT: mov b28, v18.b[11]
+; CHECK-GI-NEXT: mov b29, v18.b[12]
+; CHECK-GI-NEXT: fmov w23, s25
+; CHECK-GI-NEXT: mov b25, v18.b[13]
+; CHECK-GI-NEXT: fmov w21, s23
+; CHECK-GI-NEXT: mov v7.h[7], w24
+; CHECK-GI-NEXT: uxtb w24, w8
+; CHECK-GI-NEXT: uxtb w8, w9
+; CHECK-GI-NEXT: uxtb w9, w29
+; CHECK-GI-NEXT: mov b23, v18.b[14]
+; CHECK-GI-NEXT: mov b22, v18.b[15]
+; CHECK-GI-NEXT: fmov s21, w24
+; CHECK-GI-NEXT: fmov s18, w26
+; CHECK-GI-NEXT: fmov w28, s19
+; CHECK-GI-NEXT: mov b19, v16.b[1]
+; CHECK-GI-NEXT: mov v17.h[5], w22
+; CHECK-GI-NEXT: fmov w7, s20
+; CHECK-GI-NEXT: fmov w11, s27
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: mov b20, v16.b[2]
+; CHECK-GI-NEXT: mov v21.h[1], w8
+; CHECK-GI-NEXT: uxtb w8, w4
+; CHECK-GI-NEXT: mov v18.h[1], w10
+; CHECK-GI-NEXT: uxtb w10, w5
+; CHECK-GI-NEXT: uxtb w7, w7
+; CHECK-GI-NEXT: fmov w24, s23
+; CHECK-GI-NEXT: mov b23, v16.b[6]
+; CHECK-GI-NEXT: fmov w4, s22
+; CHECK-GI-NEXT: mov b22, v16.b[8]
+; CHECK-GI-NEXT: mov v17.h[6], w8
+; CHECK-GI-NEXT: fmov w8, s19
+; CHECK-GI-NEXT: fmov s19, w19
+; CHECK-GI-NEXT: mov v21.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w28
+; CHECK-GI-NEXT: mov v18.h[2], w10
+; CHECK-GI-NEXT: uxtb w10, w6
+; CHECK-GI-NEXT: mov b27, v16.b[9]
+; CHECK-GI-NEXT: fmov w20, s24
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b24, v16.b[3]
+; CHECK-GI-NEXT: fmov w5, s20
+; CHECK-GI-NEXT: mov v19.h[1], w10
+; CHECK-GI-NEXT: fmov w10, s23
+; CHECK-GI-NEXT: fmov s23, w7
+; CHECK-GI-NEXT: mov v21.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: uxtb w27, w30
+; CHECK-GI-NEXT: uxtb w5, w5
+; CHECK-GI-NEXT: fmov w7, s22
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: mov v23.h[1], w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: mov b20, v16.b[10]
+; CHECK-GI-NEXT: fmov w22, s28
+; CHECK-GI-NEXT: fmov w25, s25
+; CHECK-GI-NEXT: uxtb w7, w7
+; CHECK-GI-NEXT: mov v21.h[4], w11
+; CHECK-GI-NEXT: fmov w11, s27
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b25, v16.b[5]
+; CHECK-GI-NEXT: fmov w29, s24
+; CHECK-GI-NEXT: fmov s22, w7
+; CHECK-GI-NEXT: mov v23.h[2], w27
+; CHECK-GI-NEXT: mov b24, v16.b[11]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov w27, s20
+; CHECK-GI-NEXT: fmov s20, w9
+; CHECK-GI-NEXT: fmov w26, s29
+; CHECK-GI-NEXT: mov b26, v16.b[4]
+; CHECK-GI-NEXT: mov v19.h[2], w3
+; CHECK-GI-NEXT: uxtb w3, w29
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[1], w11
+; CHECK-GI-NEXT: uxtb w11, w15
+; CHECK-GI-NEXT: uxtb w15, w22
+; CHECK-GI-NEXT: uxtb w22, w23
+; CHECK-GI-NEXT: mov v20.h[1], w8
+; CHECK-GI-NEXT: fmov w6, s25
+; CHECK-GI-NEXT: mov v18.h[3], w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: mov v23.h[3], w15
+; CHECK-GI-NEXT: uxtb w15, w17
+; CHECK-GI-NEXT: uxtb w17, w21
+; CHECK-GI-NEXT: mov b25, v16.b[12]
+; CHECK-GI-NEXT: fmov w28, s24
+; CHECK-GI-NEXT: mov v21.h[5], w22
+; CHECK-GI-NEXT: mov v22.h[2], w11
+; CHECK-GI-NEXT: uxtb w11, w14
+; CHECK-GI-NEXT: uxtb w14, w26
+; CHECK-GI-NEXT: mov v20.h[2], w5
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: fmov w19, s26
+; CHECK-GI-NEXT: mov v18.h[4], w11
+; CHECK-GI-NEXT: uxtb w11, w28
+; CHECK-GI-NEXT: mov v23.h[4], w14
+; CHECK-GI-NEXT: uxtb w14, w25
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov b26, v16.b[13]
+; CHECK-GI-NEXT: fmov w7, s25
+; CHECK-GI-NEXT: mov v19.h[3], w15
+; CHECK-GI-NEXT: uxtb w15, w18
+; CHECK-GI-NEXT: uxtb w18, w19
+; CHECK-GI-NEXT: mov v21.h[6], w17
+; CHECK-GI-NEXT: uxtb w17, w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[3], w11
+; CHECK-GI-NEXT: uxtb w11, w2
+; CHECK-GI-NEXT: mov v20.h[3], w3
+; CHECK-GI-NEXT: mov v23.h[5], w14
+; CHECK-GI-NEXT: uxtb w14, w24
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v18.h[5], w11
+; CHECK-GI-NEXT: uxtb w11, w7
+; CHECK-GI-NEXT: fmov w8, s26
+; CHECK-GI-NEXT: mov v19.h[4], w15
+; CHECK-GI-NEXT: ldr w15, [sp] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v21.h[7], w17
+; CHECK-GI-NEXT: uxtb w17, w6
+; CHECK-GI-NEXT: mov v22.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w15, w15
+; CHECK-GI-NEXT: fmov w13, s30
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov v20.h[4], w18
+; CHECK-GI-NEXT: mov v23.h[6], w14
+; CHECK-GI-NEXT: mov v19.h[5], w16
+; CHECK-GI-NEXT: mov b27, v16.b[14]
+; CHECK-GI-NEXT: mul v24.8h, v7.8h, v21.8h
+; CHECK-GI-NEXT: mov v22.h[5], w8
+; CHECK-GI-NEXT: uxtb w8, w4
+; CHECK-GI-NEXT: mov b7, v16.b[7]
+; CHECK-GI-NEXT: mov b16, v16.b[15]
+; CHECK-GI-NEXT: fmov w12, s31
+; CHECK-GI-NEXT: mov v17.h[7], w11
+; CHECK-GI-NEXT: uxtb w11, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v20.h[5], w17
+; CHECK-GI-NEXT: mov v23.h[7], w8
+; CHECK-GI-NEXT: fmov w9, s27
+; CHECK-GI-NEXT: mov v18.h[6], w15
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov v19.h[6], w11
+; CHECK-GI-NEXT: fmov w12, s16
+; CHECK-GI-NEXT: fmov w11, s7
; CHECK-GI-NEXT: fmov s4, wzr
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v20.h[6], w10
+; CHECK-GI-NEXT: umov w10, v24.h[0]
+; CHECK-GI-NEXT: mul v21.8h, v17.8h, v23.8h
+; CHECK-GI-NEXT: mov v18.h[7], w13
; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov v19.h[7], w8
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: umov w12, v24.h[4]
+; CHECK-GI-NEXT: mov v22.h[6], w9
+; CHECK-GI-NEXT: umov w9, v24.h[1]
+; CHECK-GI-NEXT: mov v20.h[7], w11
+; CHECK-GI-NEXT: umov w11, v24.h[5]
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: ldrb w10, [x1, #32]
+; CHECK-GI-NEXT: umov w13, v21.h[0]
+; CHECK-GI-NEXT: umov w14, v21.h[1]
+; CHECK-GI-NEXT: umov w15, v21.h[4]
; CHECK-GI-NEXT: mov v6.s[1], wzr
-; CHECK-GI-NEXT: fmov s7, w8
-; CHECK-GI-NEXT: fmov s17, w9
-; CHECK-GI-NEXT: umov w8, v21.b[6]
-; CHECK-GI-NEXT: fmov s16, w13
-; CHECK-GI-NEXT: umov w9, v21.b[3]
-; CHECK-GI-NEXT: umov w13, v21.b[7]
-; CHECK-GI-NEXT: fmov s18, w15
-; CHECK-GI-NEXT: umov w15, v25.b[4]
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w10
-; CHECK-GI-NEXT: umov w10, v21.b[12]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: umov w11, v21.b[13]
-; CHECK-GI-NEXT: mov v16.s[1], w14
-; CHECK-GI-NEXT: umov w14, v25.b[1]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: mov v22.h[7], w8
+; CHECK-GI-NEXT: umov w12, v24.h[6]
+; CHECK-GI-NEXT: umov w8, v24.h[2]
+; CHECK-GI-NEXT: mov v7.s[1], w9
+; CHECK-GI-NEXT: ldrb w9, [x0, #32]
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: mul v23.8h, v18.8h, v20.8h
+; CHECK-GI-NEXT: umov w13, v24.h[7]
+; CHECK-GI-NEXT: mov v16.s[1], w11
+; CHECK-GI-NEXT: umov w11, v21.h[5]
+; CHECK-GI-NEXT: fmov s18, w15
+; CHECK-GI-NEXT: mul v19.8h, v19.8h, v22.8h
+; CHECK-GI-NEXT: umov w15, v21.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
+; CHECK-GI-NEXT: mov v17.s[1], w14
+; CHECK-GI-NEXT: umov w14, v21.h[2]
+; CHECK-GI-NEXT: mov v7.s[2], w8
+; CHECK-GI-NEXT: mul w8, w10, w9
+; CHECK-GI-NEXT: umov w9, v23.h[0]
+; CHECK-GI-NEXT: umov w10, v23.h[1]
+; CHECK-GI-NEXT: mov v16.s[2], w12
+; CHECK-GI-NEXT: umov w12, v21.h[3]
+; CHECK-GI-NEXT: mov v18.s[1], w11
+; CHECK-GI-NEXT: umov w11, v23.h[4]
; CHECK-GI-NEXT: mov v3.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: fmov s20, w15
-; CHECK-GI-NEXT: umov w15, v25.b[13]
+; CHECK-GI-NEXT: mov v17.s[2], w14
+; CHECK-GI-NEXT: umov w14, v23.h[5]
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: fmov s19, w10
-; CHECK-GI-NEXT: mov v7.s[2], w12
-; CHECK-GI-NEXT: umov w12, v21.b[10]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: umov w14, v25.b[5]
-; CHECK-GI-NEXT: mov v17.s[2], w8
-; CHECK-GI-NEXT: umov w8, v21.b[11]
-; CHECK-GI-NEXT: umov w10, v21.b[14]
+; CHECK-GI-NEXT: fmov s20, w9
+; CHECK-GI-NEXT: umov w9, v19.h[1]
; CHECK-GI-NEXT: mov v5.s[2], wzr
-; CHECK-GI-NEXT: mov v19.s[1], w11
-; CHECK-GI-NEXT: umov w11, v25.b[2]
+; CHECK-GI-NEXT: mov v16.s[3], w13
+; CHECK-GI-NEXT: umov w13, v19.h[0]
+; CHECK-GI-NEXT: mov v18.s[2], w15
+; CHECK-GI-NEXT: umov w15, v21.h[7]
+; CHECK-GI-NEXT: fmov s21, w11
+; CHECK-GI-NEXT: umov w11, v23.h[2]
+; CHECK-GI-NEXT: mov v17.s[3], w12
+; CHECK-GI-NEXT: umov w12, v19.h[4]
+; CHECK-GI-NEXT: mov v20.s[1], w10
+; CHECK-GI-NEXT: umov w10, v23.h[3]
; CHECK-GI-NEXT: mov v6.s[2], wzr
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: umov w12, v25.b[8]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v20.s[1], w14
-; CHECK-GI-NEXT: umov w14, v21.b[15]
-; CHECK-GI-NEXT: umov w9, v25.b[9]
-; CHECK-GI-NEXT: mov v17.s[3], w13
-; CHECK-GI-NEXT: umov w13, v25.b[12]
+; CHECK-GI-NEXT: umov w16, v24.h[3]
+; CHECK-GI-NEXT: fmov s22, w13
+; CHECK-GI-NEXT: umov w13, v19.h[5]
+; CHECK-GI-NEXT: mov v21.s[1], w14
+; CHECK-GI-NEXT: umov w14, v23.h[6]
+; CHECK-GI-NEXT: mov v18.s[3], w15
+; CHECK-GI-NEXT: umov w15, v19.h[2]
+; CHECK-GI-NEXT: mov v20.s[2], w11
+; CHECK-GI-NEXT: umov w11, v19.h[6]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v18.s[2], w11
-; CHECK-GI-NEXT: umov w11, v26.b[0]
-; CHECK-GI-NEXT: mov v19.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w12
-; CHECK-GI-NEXT: umov w12, v26.b[1]
-; CHECK-GI-NEXT: mov v16.s[3], w8
-; CHECK-GI-NEXT: umov w8, v26.b[5]
-; CHECK-GI-NEXT: umov w10, v25.b[6]
+; CHECK-GI-NEXT: mov v22.s[1], w9
+; CHECK-GI-NEXT: umov w9, v23.h[7]
+; CHECK-GI-NEXT: fmov s23, w12
+; CHECK-GI-NEXT: umov w12, v19.h[3]
; CHECK-GI-NEXT: mov v1.s[2], wzr
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: umov w13, v25.b[3]
; CHECK-GI-NEXT: mov v3.s[2], wzr
-; CHECK-GI-NEXT: fmov s24, w11
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: umov w9, v25.b[10]
-; CHECK-GI-NEXT: umov w11, v26.b[2]
-; CHECK-GI-NEXT: mov v19.s[3], w14
-; CHECK-GI-NEXT: umov w14, v26.b[13]
-; CHECK-GI-NEXT: mov v23.s[1], w15
-; CHECK-GI-NEXT: umov w15, v25.b[14]
-; CHECK-GI-NEXT: mov v20.s[2], w10
-; CHECK-GI-NEXT: mov v24.s[1], w12
-; CHECK-GI-NEXT: umov w12, v26.b[4]
-; CHECK-GI-NEXT: umov w10, v25.b[7]
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: umov w9, v25.b[11]
-; CHECK-GI-NEXT: mov v18.s[3], w13
-; CHECK-GI-NEXT: umov w13, v26.b[9]
+; CHECK-GI-NEXT: mov v21.s[2], w14
; CHECK-GI-NEXT: mov v2.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v23.s[2], w15
-; CHECK-GI-NEXT: umov w15, v25.b[15]
+; CHECK-GI-NEXT: mov v23.s[1], w13
; CHECK-GI-NEXT: mov v5.s[3], wzr
-; CHECK-GI-NEXT: fmov s27, w12
-; CHECK-GI-NEXT: mov v24.s[2], w11
-; CHECK-GI-NEXT: umov w11, v26.b[6]
-; CHECK-GI-NEXT: umov w12, v26.b[8]
-; CHECK-GI-NEXT: mov v21.s[3], w9
-; CHECK-GI-NEXT: umov w9, v26.b[12]
-; CHECK-GI-NEXT: mov v20.s[3], w10
-; CHECK-GI-NEXT: umov w10, v26.b[3]
; CHECK-GI-NEXT: mov v6.s[3], wzr
-; CHECK-GI-NEXT: mov v27.s[1], w8
-; CHECK-GI-NEXT: mov v23.s[3], w15
-; CHECK-GI-NEXT: umov w15, v22.b[0]
-; CHECK-GI-NEXT: umov w8, v26.b[7]
+; CHECK-GI-NEXT: mov v22.s[2], w15
+; CHECK-GI-NEXT: mov v7.s[3], w16
+; CHECK-GI-NEXT: mov v20.s[3], w10
; CHECK-GI-NEXT: mov v0.s[3], wzr
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: fmov s25, w12
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: umov w9, v22.b[5]
-; CHECK-GI-NEXT: mov v24.s[3], w10
-; CHECK-GI-NEXT: umov w10, v22.b[1]
-; CHECK-GI-NEXT: umov w12, v26.b[10]
-; CHECK-GI-NEXT: mov v27.s[2], w11
-; CHECK-GI-NEXT: umov w11, v22.b[4]
-; CHECK-GI-NEXT: fmov s28, w15
-; CHECK-GI-NEXT: mov v25.s[1], w13
-; CHECK-GI-NEXT: umov w13, v26.b[14]
-; CHECK-GI-NEXT: mov v29.s[1], w14
-; CHECK-GI-NEXT: umov w15, v22.b[12]
-; CHECK-GI-NEXT: umov w14, v22.b[2]
; CHECK-GI-NEXT: mov v3.s[3], wzr
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: umov w10, v22.b[13]
+; CHECK-GI-NEXT: mov v21.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: fmov s30, w11
-; CHECK-GI-NEXT: umov w11, v22.b[6]
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: mov v25.s[2], w12
-; CHECK-GI-NEXT: mov v29.s[2], w13
-; CHECK-GI-NEXT: umov w13, v26.b[11]
-; CHECK-GI-NEXT: fmov s31, w15
-; CHECK-GI-NEXT: umov w15, v26.b[15]
-; CHECK-GI-NEXT: umov w12, v22.b[9]
-; CHECK-GI-NEXT: mov v30.s[1], w9
-; CHECK-GI-NEXT: umov w9, v22.b[8]
-; CHECK-GI-NEXT: mov v28.s[2], w14
-; CHECK-GI-NEXT: ldrb w14, [x1, #32]
-; CHECK-GI-NEXT: umov w8, v22.b[15]
-; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: umov w10, v22.b[14]
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldrb w13, [x0, #32]
-; CHECK-GI-NEXT: mov v29.s[3], w15
+; CHECK-GI-NEXT: mov v23.s[2], w11
+; CHECK-GI-NEXT: umov w11, v19.h[7]
+; CHECK-GI-NEXT: fmov s19, w8
+; CHECK-GI-NEXT: mov v22.s[3], w12
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v30.s[2], w11
-; CHECK-GI-NEXT: fmov s26, w9
-; CHECK-GI-NEXT: umov w9, v22.b[7]
-; CHECK-GI-NEXT: umov w11, v22.b[3]
; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s
-; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s
-; CHECK-GI-NEXT: mov v31.s[2], w10
+; CHECK-GI-NEXT: add v6.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v7.4s, v17.4s, v18.4s
; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: umov w12, v22.b[10]
-; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s
-; CHECK-GI-NEXT: mov v30.s[3], w9
-; CHECK-GI-NEXT: mul w9, w14, w13
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: mov v28.s[3], w11
+; CHECK-GI-NEXT: mov v19.s[1], wzr
+; CHECK-GI-NEXT: add v16.4s, v20.4s, v21.4s
+; CHECK-GI-NEXT: mov v23.s[3], w11
; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: mov v31.s[3], w8
-; CHECK-GI-NEXT: umov w8, v22.b[11]
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s
-; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
+; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s
+; CHECK-GI-NEXT: mov v19.s[2], wzr
+; CHECK-GI-NEXT: add v17.4s, v22.4s, v23.4s
; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s
-; CHECK-GI-NEXT: mov v26.s[3], w8
-; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s
-; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s
-; CHECK-GI-NEXT: mov v8.s[3], wzr
-; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s
-; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s
+; CHECK-GI-NEXT: mov v19.s[3], wzr
+; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s
; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v0.4s, v19.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <33 x i8>, ptr %a
@@ -4359,197 +5143,412 @@ define i32 @test_sdot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_sdot_v33i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: .cfi_offset b8, -16
-; CHECK-GI-NEXT: ldp q21, q25, [x1]
-; CHECK-GI-NEXT: fmov s5, wzr
-; CHECK-GI-NEXT: ldp q26, q22, [x0]
-; CHECK-GI-NEXT: fmov s6, wzr
-; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NEXT: .cfi_offset w23, -40
+; CHECK-GI-NEXT: .cfi_offset w24, -48
+; CHECK-GI-NEXT: .cfi_offset w25, -56
+; CHECK-GI-NEXT: .cfi_offset w26, -64
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q7, q16, [x1]
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: smov w8, v21.b[0]
-; CHECK-GI-NEXT: smov w9, v21.b[4]
-; CHECK-GI-NEXT: smov w10, v21.b[1]
-; CHECK-GI-NEXT: smov w13, v21.b[8]
-; CHECK-GI-NEXT: smov w11, v21.b[5]
-; CHECK-GI-NEXT: smov w14, v21.b[9]
-; CHECK-GI-NEXT: smov w15, v25.b[0]
-; CHECK-GI-NEXT: smov w12, v21.b[2]
; CHECK-GI-NEXT: fmov s2, wzr
+; CHECK-GI-NEXT: fmov s5, wzr
; CHECK-GI-NEXT: fmov s4, wzr
-; CHECK-GI-NEXT: mov v5.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[1], wzr
-; CHECK-GI-NEXT: fmov s7, w8
-; CHECK-GI-NEXT: fmov s17, w9
-; CHECK-GI-NEXT: smov w8, v21.b[6]
-; CHECK-GI-NEXT: fmov s16, w13
-; CHECK-GI-NEXT: smov w9, v21.b[3]
-; CHECK-GI-NEXT: smov w13, v21.b[7]
-; CHECK-GI-NEXT: fmov s18, w15
-; CHECK-GI-NEXT: smov w15, v25.b[4]
-; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w10
-; CHECK-GI-NEXT: smov w10, v21.b[12]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: smov w11, v21.b[13]
-; CHECK-GI-NEXT: mov v16.s[1], w14
-; CHECK-GI-NEXT: smov w14, v25.b[1]
+; CHECK-GI-NEXT: fmov s6, wzr
+; CHECK-GI-NEXT: mov b19, v7.b[3]
+; CHECK-GI-NEXT: mov b23, v7.b[7]
+; CHECK-GI-NEXT: mov b17, v7.b[1]
+; CHECK-GI-NEXT: fmov w11, s7
+; CHECK-GI-NEXT: mov b18, v7.b[2]
+; CHECK-GI-NEXT: mov b20, v7.b[4]
+; CHECK-GI-NEXT: mov b21, v7.b[5]
+; CHECK-GI-NEXT: mov b22, v7.b[6]
+; CHECK-GI-NEXT: mov b24, v7.b[8]
+; CHECK-GI-NEXT: mov b25, v7.b[9]
+; CHECK-GI-NEXT: mov b26, v7.b[10]
+; CHECK-GI-NEXT: mov b27, v7.b[11]
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov b28, v7.b[12]
+; CHECK-GI-NEXT: fmov w14, s19
+; CHECK-GI-NEXT: mov b19, v7.b[13]
+; CHECK-GI-NEXT: mov b29, v7.b[14]
+; CHECK-GI-NEXT: mov b7, v7.b[15]
+; CHECK-GI-NEXT: fmov w7, s23
+; CHECK-GI-NEXT: mov b23, v16.b[6]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: fmov w9, s18
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b30, v16.b[1]
+; CHECK-GI-NEXT: fmov w16, s25
+; CHECK-GI-NEXT: fmov w12, s20
+; CHECK-GI-NEXT: fmov w24, s21
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sxtb w7, w7
+; CHECK-GI-NEXT: fmov w22, s22
+; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov s7, w11
+; CHECK-GI-NEXT: mov b20, v16.b[2]
+; CHECK-GI-NEXT: mov b17, v16.b[3]
+; CHECK-GI-NEXT: mov b21, v16.b[4]
+; CHECK-GI-NEXT: mov b18, v16.b[5]
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: fmov w25, s27
+; CHECK-GI-NEXT: mov b22, v16.b[7]
+; CHECK-GI-NEXT: fmov w26, s28
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: sxtb w10, w16
+; CHECK-GI-NEXT: mov b25, v16.b[8]
+; CHECK-GI-NEXT: fmov w23, s19
+; CHECK-GI-NEXT: mov b24, v16.b[9]
+; CHECK-GI-NEXT: fmov w5, s29
+; CHECK-GI-NEXT: mov b26, v16.b[10]
+; CHECK-GI-NEXT: mov b19, v16.b[11]
+; CHECK-GI-NEXT: fmov w6, s30
+; CHECK-GI-NEXT: mov b27, v16.b[12]
+; CHECK-GI-NEXT: mov b28, v16.b[13]
+; CHECK-GI-NEXT: mov b29, v16.b[14]
+; CHECK-GI-NEXT: sxtb w30, w23
+; CHECK-GI-NEXT: sxtb w5, w5
+; CHECK-GI-NEXT: mov v7.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w14
+; CHECK-GI-NEXT: fmov w20, s16
+; CHECK-GI-NEXT: mov b30, v16.b[15]
+; CHECK-GI-NEXT: fmov s16, w8
+; CHECK-GI-NEXT: sxtb w8, w12
+; CHECK-GI-NEXT: fmov w15, s17
+; CHECK-GI-NEXT: fmov w11, s18
+; CHECK-GI-NEXT: ldp q18, q17, [x0]
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: sxtb w9, w27
+; CHECK-GI-NEXT: fmov w18, s20
+; CHECK-GI-NEXT: sxtb w15, w15
+; CHECK-GI-NEXT: mov v16.h[1], w10
+; CHECK-GI-NEXT: sxtb w10, w25
+; CHECK-GI-NEXT: mov b20, v18.b[3]
+; CHECK-GI-NEXT: fmov w2, s22
+; CHECK-GI-NEXT: mov b22, v18.b[1]
+; CHECK-GI-NEXT: sxtb w18, w18
+; CHECK-GI-NEXT: fmov w13, s21
+; CHECK-GI-NEXT: mov b21, v18.b[2]
+; CHECK-GI-NEXT: mov v7.h[4], w8
+; CHECK-GI-NEXT: fmov w3, s19
+; CHECK-GI-NEXT: mov b19, v18.b[6]
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w24
+; CHECK-GI-NEXT: fmov w21, s25
+; CHECK-GI-NEXT: sxtb w13, w13
+; CHECK-GI-NEXT: fmov w28, s20
+; CHECK-GI-NEXT: mov b20, v18.b[11]
+; CHECK-GI-NEXT: fmov w8, s22
+; CHECK-GI-NEXT: mov b25, v18.b[8]
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov v7.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w22
+; CHECK-GI-NEXT: fmov w19, s24
+; CHECK-GI-NEXT: mov v16.h[3], w10
+; CHECK-GI-NEXT: sxtb w10, w26
+; CHECK-GI-NEXT: fmov w26, s18
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w29, w29
+; CHECK-GI-NEXT: mov b24, v18.b[4]
+; CHECK-GI-NEXT: mov b23, v18.b[5]
+; CHECK-GI-NEXT: fmov w17, s27
+; CHECK-GI-NEXT: mov b27, v18.b[9]
+; CHECK-GI-NEXT: sxtb w23, w26
+; CHECK-GI-NEXT: mov v7.h[6], w9
+; CHECK-GI-NEXT: fmov w24, s19
+; CHECK-GI-NEXT: mov v16.h[4], w10
+; CHECK-GI-NEXT: mov b19, v18.b[14]
+; CHECK-GI-NEXT: fmov w10, s25
+; CHECK-GI-NEXT: fmov w4, s26
+; CHECK-GI-NEXT: fmov w16, s28
+; CHECK-GI-NEXT: mov b26, v18.b[7]
+; CHECK-GI-NEXT: mov b28, v18.b[10]
+; CHECK-GI-NEXT: fmov w27, s24
+; CHECK-GI-NEXT: mov b24, v18.b[12]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v7.h[7], w7
+; CHECK-GI-NEXT: fmov w7, s20
+; CHECK-GI-NEXT: sxtb w4, w4
+; CHECK-GI-NEXT: fmov s20, w23
+; CHECK-GI-NEXT: fmov w25, s23
+; CHECK-GI-NEXT: mov b23, v18.b[13]
+; CHECK-GI-NEXT: mov b22, v18.b[15]
+; CHECK-GI-NEXT: mov v16.h[5], w30
+; CHECK-GI-NEXT: sxtb w7, w7
+; CHECK-GI-NEXT: fmov w9, s27
+; CHECK-GI-NEXT: mov b21, v17.b[1]
+; CHECK-GI-NEXT: mov v20.h[1], w8
+; CHECK-GI-NEXT: sxtb w8, w20
+; CHECK-GI-NEXT: sxtb w20, w6
+; CHECK-GI-NEXT: fmov w6, s19
+; CHECK-GI-NEXT: fmov w26, s28
+; CHECK-GI-NEXT: mov b28, v17.b[8]
+; CHECK-GI-NEXT: fmov s18, w8
+; CHECK-GI-NEXT: sxtb w8, w21
+; CHECK-GI-NEXT: mov v16.h[6], w5
+; CHECK-GI-NEXT: fmov w5, s22
+; CHECK-GI-NEXT: fmov s22, w10
+; CHECK-GI-NEXT: sxtb w10, w27
+; CHECK-GI-NEXT: sxtb w26, w26
+; CHECK-GI-NEXT: mov v20.h[2], w29
+; CHECK-GI-NEXT: fmov s19, w8
+; CHECK-GI-NEXT: sxtb w8, w28
+; CHECK-GI-NEXT: sxtb w28, w19
+; CHECK-GI-NEXT: sxtb w19, w9
+; CHECK-GI-NEXT: fmov w27, s17
+; CHECK-GI-NEXT: mov b25, v17.b[2]
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov b21, v17.b[9]
+; CHECK-GI-NEXT: mov v22.h[1], w19
+; CHECK-GI-NEXT: fmov w23, s23
+; CHECK-GI-NEXT: mov v20.h[3], w8
+; CHECK-GI-NEXT: mov b23, v17.b[6]
+; CHECK-GI-NEXT: fmov w30, s24
+; CHECK-GI-NEXT: sxtb w27, w27
+; CHECK-GI-NEXT: mov b24, v17.b[5]
+; CHECK-GI-NEXT: mov v18.h[1], w20
+; CHECK-GI-NEXT: fmov w21, s25
+; CHECK-GI-NEXT: mov b25, v17.b[10]
+; CHECK-GI-NEXT: mov v19.h[1], w28
+; CHECK-GI-NEXT: sxtb w28, w29
+; CHECK-GI-NEXT: mov v22.h[2], w26
+; CHECK-GI-NEXT: fmov w26, s21
+; CHECK-GI-NEXT: mov v20.h[4], w10
+; CHECK-GI-NEXT: fmov w10, s28
+; CHECK-GI-NEXT: fmov s21, w27
+; CHECK-GI-NEXT: sxtb w21, w21
+; CHECK-GI-NEXT: mov b27, v17.b[3]
+; CHECK-GI-NEXT: fmov w19, s23
+; CHECK-GI-NEXT: sxtb w26, w26
+; CHECK-GI-NEXT: fmov w22, s26
+; CHECK-GI-NEXT: mov b26, v17.b[4]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v21.h[1], w28
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b24, v17.b[11]
+; CHECK-GI-NEXT: fmov w27, s25
+; CHECK-GI-NEXT: mov v18.h[2], w18
+; CHECK-GI-NEXT: sxtb w18, w25
+; CHECK-GI-NEXT: fmov s23, w10
+; CHECK-GI-NEXT: fmov w20, s27
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov w9, s26
+; CHECK-GI-NEXT: mov b26, v17.b[12]
+; CHECK-GI-NEXT: sxtb w25, w27
+; CHECK-GI-NEXT: mov v20.h[5], w18
+; CHECK-GI-NEXT: sxtb w18, w3
+; CHECK-GI-NEXT: sxtb w3, w24
+; CHECK-GI-NEXT: mov v23.h[1], w26
+; CHECK-GI-NEXT: mov v21.h[2], w21
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov w28, s24
+; CHECK-GI-NEXT: mov v22.h[3], w7
+; CHECK-GI-NEXT: sxtb w7, w20
+; CHECK-GI-NEXT: mov v19.h[2], w4
+; CHECK-GI-NEXT: sxtb w4, w30
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v18.h[3], w15
+; CHECK-GI-NEXT: sxtb w20, w28
+; CHECK-GI-NEXT: sxtb w15, w17
+; CHECK-GI-NEXT: sxtb w17, w22
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v23.h[2], w25
+; CHECK-GI-NEXT: mov v20.h[6], w3
+; CHECK-GI-NEXT: mov v21.h[3], w7
+; CHECK-GI-NEXT: fmov w10, s26
+; CHECK-GI-NEXT: mov v22.h[4], w4
+; CHECK-GI-NEXT: mov v19.h[3], w18
+; CHECK-GI-NEXT: sxtb w18, w23
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov b27, v17.b[13]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v23.h[3], w20
+; CHECK-GI-NEXT: mov v18.h[4], w13
+; CHECK-GI-NEXT: sxtb w13, w6
+; CHECK-GI-NEXT: mov v20.h[7], w17
+; CHECK-GI-NEXT: mov v21.h[4], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[5], w18
+; CHECK-GI-NEXT: mov b25, v17.b[14]
+; CHECK-GI-NEXT: fmov w26, s27
+; CHECK-GI-NEXT: mov v19.h[4], w15
+; CHECK-GI-NEXT: fmov w14, s29
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v23.h[4], w10
+; CHECK-GI-NEXT: sxtb w10, w11
+; CHECK-GI-NEXT: sxtb w11, w16
+; CHECK-GI-NEXT: mov v21.h[5], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w15, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v18.h[5], w10
+; CHECK-GI-NEXT: sxtb w10, w19
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mul v20.8h, v7.8h, v20.8h
+; CHECK-GI-NEXT: mov b7, v17.b[7]
+; CHECK-GI-NEXT: mov v22.h[6], w13
+; CHECK-GI-NEXT: sxtb w13, w5
+; CHECK-GI-NEXT: fmov w27, s25
+; CHECK-GI-NEXT: mov v19.h[5], w11
+; CHECK-GI-NEXT: sxtb w11, w2
+; CHECK-GI-NEXT: mov b17, v17.b[15]
+; CHECK-GI-NEXT: mov v18.h[6], w8
+; CHECK-GI-NEXT: mov v16.h[7], w9
+; CHECK-GI-NEXT: sxtb w9, w14
+; CHECK-GI-NEXT: mov v23.h[5], w15
+; CHECK-GI-NEXT: mov v21.h[6], w10
+; CHECK-GI-NEXT: sxtb w14, w27
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: mov v22.h[7], w13
+; CHECK-GI-NEXT: fmov w12, s30
+; CHECK-GI-NEXT: mov v19.h[6], w9
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: smov w10, v20.h[0]
+; CHECK-GI-NEXT: mov v23.h[6], w14
+; CHECK-GI-NEXT: mov v18.h[7], w11
+; CHECK-GI-NEXT: smov w13, v20.h[1]
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: smov w11, v20.h[4]
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mul v22.8h, v16.8h, v22.8h
+; CHECK-GI-NEXT: smov w14, v20.h[3]
+; CHECK-GI-NEXT: mov v21.h[7], w8
+; CHECK-GI-NEXT: ldrsb w8, [x0, #32]
+; CHECK-GI-NEXT: mov v19.h[7], w12
+; CHECK-GI-NEXT: mov v23.h[7], w9
+; CHECK-GI-NEXT: ldrsb w9, [x1, #32]
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: smov w10, v20.h[2]
+; CHECK-GI-NEXT: smov w12, v20.h[5]
+; CHECK-GI-NEXT: fmov s16, w11
+; CHECK-GI-NEXT: mul w9, w9, w8
+; CHECK-GI-NEXT: smov w15, v22.h[4]
+; CHECK-GI-NEXT: smov w17, v22.h[5]
+; CHECK-GI-NEXT: mul v24.8h, v18.8h, v21.8h
+; CHECK-GI-NEXT: mov v7.s[1], w13
+; CHECK-GI-NEXT: smov w13, v22.h[0]
+; CHECK-GI-NEXT: mul v18.8h, v19.8h, v23.8h
+; CHECK-GI-NEXT: smov w16, v22.h[1]
+; CHECK-GI-NEXT: smov w8, v20.h[7]
+; CHECK-GI-NEXT: sxth w9, w9
+; CHECK-GI-NEXT: mov v16.s[1], w12
+; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: fmov s19, w15
+; CHECK-GI-NEXT: smov w15, v22.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
+; CHECK-GI-NEXT: smov w11, v24.h[0]
+; CHECK-GI-NEXT: mov v7.s[2], w10
+; CHECK-GI-NEXT: smov w10, v20.h[6]
+; CHECK-GI-NEXT: smov w12, v24.h[1]
+; CHECK-GI-NEXT: smov w0, v18.h[4]
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: mov v19.s[1], w17
+; CHECK-GI-NEXT: smov w17, v18.h[0]
+; CHECK-GI-NEXT: smov w18, v18.h[1]
+; CHECK-GI-NEXT: smov w13, v22.h[2]
; CHECK-GI-NEXT: mov v3.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: fmov s20, w15
-; CHECK-GI-NEXT: smov w15, v25.b[13]
+; CHECK-GI-NEXT: fmov s20, w11
+; CHECK-GI-NEXT: smov w11, v24.h[4]
+; CHECK-GI-NEXT: mov v7.s[3], w14
+; CHECK-GI-NEXT: smov w14, v24.h[5]
+; CHECK-GI-NEXT: mov v17.s[1], w16
+; CHECK-GI-NEXT: smov w16, v24.h[2]
+; CHECK-GI-NEXT: mov v19.s[2], w15
+; CHECK-GI-NEXT: smov w15, v18.h[5]
+; CHECK-GI-NEXT: fmov s23, w0
+; CHECK-GI-NEXT: mov v20.s[1], w12
+; CHECK-GI-NEXT: mov v16.s[2], w10
+; CHECK-GI-NEXT: smov w10, v22.h[3]
+; CHECK-GI-NEXT: fmov s21, w11
+; CHECK-GI-NEXT: smov w11, v22.h[7]
+; CHECK-GI-NEXT: fmov s22, w17
+; CHECK-GI-NEXT: mov v5.s[1], wzr
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: fmov s19, w10
-; CHECK-GI-NEXT: mov v7.s[2], w12
-; CHECK-GI-NEXT: smov w12, v21.b[10]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: smov w14, v25.b[5]
-; CHECK-GI-NEXT: mov v17.s[2], w8
-; CHECK-GI-NEXT: smov w8, v21.b[11]
-; CHECK-GI-NEXT: smov w10, v21.b[14]
-; CHECK-GI-NEXT: mov v5.s[2], wzr
-; CHECK-GI-NEXT: mov v19.s[1], w11
-; CHECK-GI-NEXT: smov w11, v25.b[2]
-; CHECK-GI-NEXT: mov v6.s[2], wzr
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: smov w12, v25.b[8]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v20.s[1], w14
-; CHECK-GI-NEXT: smov w14, v21.b[15]
-; CHECK-GI-NEXT: smov w9, v25.b[9]
-; CHECK-GI-NEXT: mov v17.s[3], w13
-; CHECK-GI-NEXT: smov w13, v25.b[12]
-; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v18.s[2], w11
-; CHECK-GI-NEXT: smov w11, v26.b[0]
-; CHECK-GI-NEXT: mov v19.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w12
-; CHECK-GI-NEXT: smov w12, v26.b[1]
-; CHECK-GI-NEXT: mov v16.s[3], w8
-; CHECK-GI-NEXT: smov w8, v26.b[5]
-; CHECK-GI-NEXT: smov w10, v25.b[6]
+; CHECK-GI-NEXT: mov v6.s[1], wzr
+; CHECK-GI-NEXT: mov v23.s[1], w15
+; CHECK-GI-NEXT: smov w15, v18.h[6]
+; CHECK-GI-NEXT: mov v0.s[1], wzr
+; CHECK-GI-NEXT: mov v21.s[1], w14
+; CHECK-GI-NEXT: smov w14, v24.h[6]
+; CHECK-GI-NEXT: mov v20.s[2], w16
+; CHECK-GI-NEXT: mov v22.s[1], w18
+; CHECK-GI-NEXT: smov w16, v18.h[2]
; CHECK-GI-NEXT: mov v1.s[2], wzr
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: smov w13, v25.b[3]
; CHECK-GI-NEXT: mov v3.s[2], wzr
-; CHECK-GI-NEXT: fmov s24, w11
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: smov w9, v25.b[10]
-; CHECK-GI-NEXT: smov w11, v26.b[2]
-; CHECK-GI-NEXT: mov v19.s[3], w14
-; CHECK-GI-NEXT: smov w14, v26.b[13]
-; CHECK-GI-NEXT: mov v23.s[1], w15
-; CHECK-GI-NEXT: smov w15, v25.b[14]
-; CHECK-GI-NEXT: mov v20.s[2], w10
-; CHECK-GI-NEXT: mov v24.s[1], w12
-; CHECK-GI-NEXT: smov w12, v26.b[4]
-; CHECK-GI-NEXT: smov w10, v25.b[7]
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: smov w9, v25.b[11]
-; CHECK-GI-NEXT: mov v18.s[3], w13
-; CHECK-GI-NEXT: smov w13, v26.b[9]
; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v5.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
+; CHECK-GI-NEXT: mov v6.s[2], wzr
; CHECK-GI-NEXT: mov v23.s[2], w15
-; CHECK-GI-NEXT: smov w15, v25.b[15]
-; CHECK-GI-NEXT: mov v5.s[3], wzr
-; CHECK-GI-NEXT: fmov s27, w12
-; CHECK-GI-NEXT: mov v24.s[2], w11
-; CHECK-GI-NEXT: smov w11, v26.b[6]
-; CHECK-GI-NEXT: smov w12, v26.b[8]
-; CHECK-GI-NEXT: mov v21.s[3], w9
-; CHECK-GI-NEXT: smov w9, v26.b[12]
-; CHECK-GI-NEXT: mov v20.s[3], w10
-; CHECK-GI-NEXT: smov w10, v26.b[3]
-; CHECK-GI-NEXT: mov v6.s[3], wzr
-; CHECK-GI-NEXT: mov v27.s[1], w8
-; CHECK-GI-NEXT: mov v23.s[3], w15
-; CHECK-GI-NEXT: smov w15, v22.b[0]
-; CHECK-GI-NEXT: smov w8, v26.b[7]
-; CHECK-GI-NEXT: mov v0.s[3], wzr
+; CHECK-GI-NEXT: mov v21.s[2], w14
+; CHECK-GI-NEXT: smov w14, v18.h[3]
+; CHECK-GI-NEXT: smov w15, v18.h[7]
+; CHECK-GI-NEXT: fmov s18, w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[2], w13
+; CHECK-GI-NEXT: smov w12, v24.h[3]
+; CHECK-GI-NEXT: smov w13, v24.h[7]
+; CHECK-GI-NEXT: mov v22.s[2], w16
+; CHECK-GI-NEXT: mov v0.s[2], wzr
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: fmov s25, w12
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: smov w9, v22.b[5]
-; CHECK-GI-NEXT: mov v24.s[3], w10
-; CHECK-GI-NEXT: smov w10, v22.b[1]
-; CHECK-GI-NEXT: smov w12, v26.b[10]
-; CHECK-GI-NEXT: mov v27.s[2], w11
-; CHECK-GI-NEXT: smov w11, v22.b[4]
-; CHECK-GI-NEXT: fmov s28, w15
-; CHECK-GI-NEXT: mov v25.s[1], w13
-; CHECK-GI-NEXT: smov w13, v26.b[14]
-; CHECK-GI-NEXT: mov v29.s[1], w14
-; CHECK-GI-NEXT: smov w15, v22.b[12]
-; CHECK-GI-NEXT: smov w14, v22.b[2]
; CHECK-GI-NEXT: mov v3.s[3], wzr
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: smov w10, v22.b[13]
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: fmov s30, w11
-; CHECK-GI-NEXT: smov w11, v22.b[6]
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: mov v25.s[2], w12
-; CHECK-GI-NEXT: mov v29.s[2], w13
-; CHECK-GI-NEXT: smov w13, v26.b[11]
-; CHECK-GI-NEXT: fmov s31, w15
-; CHECK-GI-NEXT: smov w15, v26.b[15]
-; CHECK-GI-NEXT: smov w12, v22.b[9]
-; CHECK-GI-NEXT: mov v30.s[1], w9
-; CHECK-GI-NEXT: smov w9, v22.b[8]
-; CHECK-GI-NEXT: mov v28.s[2], w14
-; CHECK-GI-NEXT: ldrsb w14, [x1, #32]
-; CHECK-GI-NEXT: smov w8, v22.b[15]
-; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: smov w10, v22.b[14]
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldrsb w13, [x0, #32]
-; CHECK-GI-NEXT: mov v29.s[3], w15
+; CHECK-GI-NEXT: mov v5.s[3], wzr
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v30.s[2], w11
-; CHECK-GI-NEXT: fmov s26, w9
-; CHECK-GI-NEXT: smov w9, v22.b[7]
-; CHECK-GI-NEXT: smov w11, v22.b[3]
-; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s
-; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s
-; CHECK-GI-NEXT: mov v31.s[2], w10
+; CHECK-GI-NEXT: mov v6.s[3], wzr
+; CHECK-GI-NEXT: mov v18.s[1], wzr
+; CHECK-GI-NEXT: mov v16.s[3], w8
+; CHECK-GI-NEXT: mov v17.s[3], w10
+; CHECK-GI-NEXT: mov v19.s[3], w11
+; CHECK-GI-NEXT: mov v20.s[3], w12
+; CHECK-GI-NEXT: mov v21.s[3], w13
+; CHECK-GI-NEXT: mov v22.s[3], w14
+; CHECK-GI-NEXT: mov v23.s[3], w15
+; CHECK-GI-NEXT: mov v0.s[3], wzr
; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: smov w12, v22.b[10]
-; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s
-; CHECK-GI-NEXT: mov v30.s[3], w9
-; CHECK-GI-NEXT: mul w9, w14, w13
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: mov v28.s[3], w11
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: mov v31.s[3], w8
-; CHECK-GI-NEXT: smov w8, v22.b[11]
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s
-; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s
-; CHECK-GI-NEXT: mov v26.s[3], w8
-; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s
-; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s
-; CHECK-GI-NEXT: mov v8.s[3], wzr
-; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s
-; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s
-; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s
+; CHECK-GI-NEXT: mov v18.s[2], wzr
+; CHECK-GI-NEXT: add v4.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v5.4s, v17.4s, v19.4s
+; CHECK-GI-NEXT: add v6.4s, v20.4s, v21.4s
+; CHECK-GI-NEXT: add v7.4s, v22.4s, v23.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v18.s[3], wzr
+; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: add v0.4s, v18.4s, v0.4s
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <33 x i8>, ptr %a
@@ -4845,13 +5844,12 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33
;
; CHECK-GI-LABEL: test_sdot_v33i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub sp, sp, #96
-; CHECK-GI-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
-; CHECK-GI-NEXT: str x29, [sp, #80] // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 96
+; CHECK-GI-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: str x29, [sp, #64] // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 80
; CHECK-GI-NEXT: .cfi_offset w29, -16
; CHECK-GI-NEXT: .cfi_offset b8, -24
; CHECK-GI-NEXT: .cfi_offset b9, -32
@@ -4861,508 +5859,762 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33
; CHECK-GI-NEXT: .cfi_offset b13, -64
; CHECK-GI-NEXT: .cfi_offset b14, -72
; CHECK-GI-NEXT: .cfi_offset b15, -80
-; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: sxtb w9, w1
-; CHECK-GI-NEXT: sxtb w10, w2
-; CHECK-GI-NEXT: sxtb w11, w4
-; CHECK-GI-NEXT: sxtb w12, w5
-; CHECK-GI-NEXT: sxtb w13, w7
-; CHECK-GI-NEXT: fmov s28, w8
+; CHECK-GI-NEXT: lsl w8, w0, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #80]
+; CHECK-GI-NEXT: lsl w11, w1, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #88]
+; CHECK-GI-NEXT: ldr w13, [sp, #128]
+; CHECK-GI-NEXT: ldr w14, [sp, #136]
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w8, w11, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w11, w2, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: fmov s22, w12
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #152]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w16, w7, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s23, w10
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w11, w3, #8
+; CHECK-GI-NEXT: mov v22.h[1], w8
; CHECK-GI-NEXT: ldr w8, [sp, #96]
-; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: fmov s25, w11
-; CHECK-GI-NEXT: sxtb w11, w6
-; CHECK-GI-NEXT: ldr w14, [sp, #528]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s18, wzr
-; CHECK-GI-NEXT: fmov s20, wzr
-; CHECK-GI-NEXT: mov v28.s[1], w9
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #176]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: mov v23.h[1], w9
; CHECK-GI-NEXT: ldr w9, [sp, #104]
-; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-GI-NEXT: fmov s24, w8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: ldr w17, [sp, #224]
+; CHECK-GI-NEXT: mov v22.h[2], w10
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w11, w4, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v23.h[2], w8
; CHECK-GI-NEXT: ldr w8, [sp, #112]
-; CHECK-GI-NEXT: mov v25.s[1], w12
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w12, [sp, #136]
-; CHECK-GI-NEXT: mov v18.s[1], wzr
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v20.s[1], wzr
; CHECK-GI-NEXT: fmov s19, wzr
-; CHECK-GI-NEXT: mov v28.s[2], w10
-; CHECK-GI-NEXT: sxtb w10, w3
-; CHECK-GI-NEXT: mov v24.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #128]
-; CHECK-GI-NEXT: mov v25.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #168]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v18.s[2], wzr
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
; CHECK-GI-NEXT: fmov s21, wzr
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v20.s[2], wzr
-; CHECK-GI-NEXT: mov v28.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #160]
-; CHECK-GI-NEXT: mov v24.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #120]
-; CHECK-GI-NEXT: fmov s30, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #144]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #200]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v19.s[1], wzr
-; CHECK-GI-NEXT: fmov s22, w10
-; CHECK-GI-NEXT: mov v30.s[1], w12
-; CHECK-GI-NEXT: ldr w10, [sp, #176]
-; CHECK-GI-NEXT: mov v24.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #224]
-; CHECK-GI-NEXT: ldr w12, [sp, #152]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v22.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #192]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v30.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #232]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s23, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #240]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v18.s[3], wzr
-; CHECK-GI-NEXT: mov v20.s[3], wzr
-; CHECK-GI-NEXT: mov v22.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #184]
-; CHECK-GI-NEXT: fmov s26, w11
-; CHECK-GI-NEXT: mov v23.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #256]
-; CHECK-GI-NEXT: ldr w11, [sp, #208]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v30.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #264]
-; CHECK-GI-NEXT: mov v26.s[1], w13
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v22.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #296]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w13, [sp, #216]
-; CHECK-GI-NEXT: sxtb w9, w10
-; CHECK-GI-NEXT: mov v23.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #248]
-; CHECK-GI-NEXT: mov v26.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #304]
-; CHECK-GI-NEXT: ldr w10, [sp, #272]
-; CHECK-GI-NEXT: fmov s31, w9
-; CHECK-GI-NEXT: mov v29.s[1], w12
-; CHECK-GI-NEXT: ldr w9, [sp, #312]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w12, [sp, #280]
+; CHECK-GI-NEXT: mov v22.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #144]
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: fmov s16, wzr
-; CHECK-GI-NEXT: mov v31.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #328]
-; CHECK-GI-NEXT: mov v23.s[3], w8
-; CHECK-GI-NEXT: sxtb w8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #360]
-; CHECK-GI-NEXT: mov v29.s[2], w10
-; CHECK-GI-NEXT: sxtb w10, w11
-; CHECK-GI-NEXT: mov v26.s[3], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #336]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #368]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v31.s[2], w8
-; CHECK-GI-NEXT: fmov s0, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #320]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s12, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v29.s[3], w12
-; CHECK-GI-NEXT: ldr w9, [sp, #376]
-; CHECK-GI-NEXT: mov v0.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #344]
-; CHECK-GI-NEXT: ldr w8, [sp, #288]
-; CHECK-GI-NEXT: mov v12.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #392]
-; CHECK-GI-NEXT: mov v31.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #424]
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #400]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v0.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #432]
-; CHECK-GI-NEXT: fmov s13, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v12.s[2], w9
-; CHECK-GI-NEXT: fmov s8, w10
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w10, [sp, #440]
-; CHECK-GI-NEXT: ldr w11, [sp, #384]
-; CHECK-GI-NEXT: ldr w9, [sp, #352]
+; CHECK-GI-NEXT: fmov s18, wzr
; CHECK-GI-NEXT: fmov s17, wzr
-; CHECK-GI-NEXT: mov v13.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #408]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v8.s[1], w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w12, [sp, #456]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: mov v12.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #488]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v13.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #496]
-; CHECK-GI-NEXT: mov v0.s[3], w9
-; CHECK-GI-NEXT: mov v8.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #416]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w9, [sp, #464]
-; CHECK-GI-NEXT: fmov s14, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: fmov s9, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #504]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w12, [sp, #448]
-; CHECK-GI-NEXT: mul v27.4s, v25.4s, v0.4s
-; CHECK-GI-NEXT: mov v13.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #560]
-; CHECK-GI-NEXT: sxtb w15, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #568]
-; CHECK-GI-NEXT: mov v9.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #520]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v14.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #472]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s10, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #552]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s15, w13
-; CHECK-GI-NEXT: mov v8.s[3], w12
-; CHECK-GI-NEXT: sxtb w12, w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v14.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #480]
-; CHECK-GI-NEXT: mov v10.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #576]
-; CHECK-GI-NEXT: mov v9.s[2], w15
-; CHECK-GI-NEXT: mul w8, w8, w10
-; CHECK-GI-NEXT: mov v15.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #512]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w10, [sp, #584]
-; CHECK-GI-NEXT: ldr w13, [sp, #536]
-; CHECK-GI-NEXT: mla v27.4s, v28.4s, v31.4s
-; CHECK-GI-NEXT: mul v30.4s, v30.4s, v13.4s
-; CHECK-GI-NEXT: mov v10.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #592]
-; CHECK-GI-NEXT: fmov s25, w8
-; CHECK-GI-NEXT: mov v14.s[3], w9
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #624]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v9.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #600]
-; CHECK-GI-NEXT: mla v30.4s, v24.4s, v12.4s
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v10.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #632]
-; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #656]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s28, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #688]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v15.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #544]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v0.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #664]
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #696]
-; CHECK-GI-NEXT: fmov s11, w8
-; CHECK-GI-NEXT: fmov s31, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w11, [sp, #672]
-; CHECK-GI-NEXT: ldr w8, [sp, #616]
-; CHECK-GI-NEXT: mov v11.s[1], w9
-; CHECK-GI-NEXT: mov v15.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #608]
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #704]
-; CHECK-GI-NEXT: ldr w9, [sp, #640]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mul v24.4s, v26.4s, v14.4s
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v11.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #712]
-; CHECK-GI-NEXT: mov v0.s[2], w12
-; CHECK-GI-NEXT: mov v31.s[2], w10
-; CHECK-GI-NEXT: ldr w12, [sp, #648]
-; CHECK-GI-NEXT: mov v28.s[2], w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w10, [sp, #720]
-; CHECK-GI-NEXT: ldr w9, [sp, #680]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mul v26.4s, v29.4s, v15.4s
-; CHECK-GI-NEXT: mla v24.4s, v22.4s, v8.4s
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v0.s[3], w8
-; CHECK-GI-NEXT: mov v31.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #784]
-; CHECK-GI-NEXT: mov v28.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #752]
-; CHECK-GI-NEXT: fmov s13, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #792]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v11.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #760]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w8, [sp, #728]
-; CHECK-GI-NEXT: fmov s14, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #744]
-; CHECK-GI-NEXT: fmov s12, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #824]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mla v26.4s, v23.4s, v9.4s
-; CHECK-GI-NEXT: ldr w13, [sp, #984]
-; CHECK-GI-NEXT: mov v14.s[1], w10
-; CHECK-GI-NEXT: sxtb w10, w12
-; CHECK-GI-NEXT: mov v13.s[1], w8
-; CHECK-GI-NEXT: mov v12.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #832]
-; CHECK-GI-NEXT: ldr w8, [sp, #736]
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: ldr w12, [sp, #768]
-; CHECK-GI-NEXT: ldr w10, [sp, #800]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.h[3], w9
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #120]
+; CHECK-GI-NEXT: fmov s20, wzr
; CHECK-GI-NEXT: fmov s6, wzr
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v22.h[4], w11
+; CHECK-GI-NEXT: lsl w11, w5, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: fmov s7, wzr
; CHECK-GI-NEXT: fmov s2, wzr
-; CHECK-GI-NEXT: mov v29.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #840]
-; CHECK-GI-NEXT: mov v13.s[2], w8
-; CHECK-GI-NEXT: mov v12.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #808]
-; CHECK-GI-NEXT: mov v14.s[2], w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w8, [sp, #776]
-; CHECK-GI-NEXT: ldr w10, [sp, #848]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s5, wzr
+; CHECK-GI-NEXT: fmov s24, w10
+; CHECK-GI-NEXT: mov v23.h[4], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #160]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #168]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: fmov s4, wzr
-; CHECK-GI-NEXT: mov v29.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #856]
-; CHECK-GI-NEXT: mov v13.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #864]
-; CHECK-GI-NEXT: mov v14.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #888]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s7, wzr
-; CHECK-GI-NEXT: fmov s15, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #920]
-; CHECK-GI-NEXT: mov v12.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #872]
-; CHECK-GI-NEXT: mov v29.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #896]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s22, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #928]
-; CHECK-GI-NEXT: mov v15.s[1], w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #952]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w11, [sp, #904]
-; CHECK-GI-NEXT: mov v22.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #936]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v19.s[2], wzr
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mov v15.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #960]
-; CHECK-GI-NEXT: mov v8.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #880]
-; CHECK-GI-NEXT: fmov s23, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w9, [sp, #944]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v22.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #912]
-; CHECK-GI-NEXT: mov v8.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #968]
-; CHECK-GI-NEXT: mov v23.s[1], w8
-; CHECK-GI-NEXT: mov v15.s[3], w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: add v18.4s, v18.4s, v20.4s
-; CHECK-GI-NEXT: mov v22.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #992]
-; CHECK-GI-NEXT: fmov s9, w12
-; CHECK-GI-NEXT: mov v23.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #1048]
-; CHECK-GI-NEXT: ldr w12, [sp, #1056]
-; CHECK-GI-NEXT: mul v0.4s, v0.4s, v15.4s
-; CHECK-GI-NEXT: sxtb w13, w11
-; CHECK-GI-NEXT: mov v8.s[3], w9
-; CHECK-GI-NEXT: sxtb w11, w10
-; CHECK-GI-NEXT: ldr w9, [sp, #1000]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v9.s[1], w13
-; CHECK-GI-NEXT: ldr w10, [sp, #1016]
-; CHECK-GI-NEXT: ldr w8, [sp, #816]
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov s3, wzr
+; CHECK-GI-NEXT: mov v24.h[1], w12
+; CHECK-GI-NEXT: lsl w12, w6, #8
+; CHECK-GI-NEXT: mov v22.h[5], w11
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.h[5], w9
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #184]
+; CHECK-GI-NEXT: ldr w9, [sp, #192]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: fmov s5, wzr
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: mov v24.h[2], w8
+; CHECK-GI-NEXT: mov v22.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #208]
+; CHECK-GI-NEXT: mov v23.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #216]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w8, [sp, #200]
+; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v19.s[1], wzr
+; CHECK-GI-NEXT: mov v24.h[3], w10
+; CHECK-GI-NEXT: sbfx w10, w14, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #280]
+; CHECK-GI-NEXT: mov v22.h[7], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #288]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v23.h[7], w10
+; CHECK-GI-NEXT: lsl w18, w16, #8
+; CHECK-GI-NEXT: fmov s27, w12
+; CHECK-GI-NEXT: ldr w10, [sp, #232]
+; CHECK-GI-NEXT: sbfx w16, w14, #8, #8
+; CHECK-GI-NEXT: mov v24.h[4], w15
+; CHECK-GI-NEXT: lsl w15, w11, #8
+; CHECK-GI-NEXT: sbfx w14, w18, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #296]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: fmov s25, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #344]
+; CHECK-GI-NEXT: mov v27.h[1], w13
+; CHECK-GI-NEXT: lsl w13, w17, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #240]
+; CHECK-GI-NEXT: sbfx w17, w10, #8, #8
+; CHECK-GI-NEXT: mov v25.h[1], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #352]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v24.h[5], w15
+; CHECK-GI-NEXT: mov v27.h[2], w13
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #304]
+; CHECK-GI-NEXT: fmov s26, w16
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #248]
+; CHECK-GI-NEXT: mov v25.h[2], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #360]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v24.h[6], w9
+; CHECK-GI-NEXT: lsl w16, w11, #8
+; CHECK-GI-NEXT: mov v26.h[1], w13
+; CHECK-GI-NEXT: mov v27.h[3], w17
+; CHECK-GI-NEXT: sbfx w13, w14, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #312]
+; CHECK-GI-NEXT: ldr w17, [sp, #328]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #256]
+; CHECK-GI-NEXT: ldr w11, [sp, #264]
+; CHECK-GI-NEXT: mov v25.h[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #368]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v26.h[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #320]
+; CHECK-GI-NEXT: mov v27.h[4], w12
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #408]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w12, w13, #8, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #376]
+; CHECK-GI-NEXT: mov v25.h[4], w9
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: mov v26.h[3], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #416]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w16, w17, #8
+; CHECK-GI-NEXT: mov v27.h[5], w9
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v25.h[5], w14
+; CHECK-GI-NEXT: fmov s29, w15
+; CHECK-GI-NEXT: ldr w14, [sp, #384]
+; CHECK-GI-NEXT: ldr w15, [sp, #472]
+; CHECK-GI-NEXT: mov v26.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #424]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v29.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #480]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v25.h[6], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #432]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v26.h[5], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #392]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v29.h[2], w13
+; CHECK-GI-NEXT: fmov s28, w15
+; CHECK-GI-NEXT: ldr w9, [sp, #336]
+; CHECK-GI-NEXT: ldr w13, [sp, #488]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #440]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v28.h[1], w12
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v29.h[3], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #496]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #400]
+; CHECK-GI-NEXT: mov v26.h[6], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #448]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v28.h[2], w13
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v25.h[7], w9
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v29.h[4], w15
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #456]
+; CHECK-GI-NEXT: ldr w15, [sp, #504]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w14, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: mov v28.h[3], w16
+; CHECK-GI-NEXT: ldr w15, [sp, #512]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mul v30.8h, v22.8h, v25.8h
+; CHECK-GI-NEXT: mov v26.h[7], w9
+; CHECK-GI-NEXT: mov v29.h[5], w12
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w13, #8, #8
+; CHECK-GI-NEXT: lsl w13, w15, #8
+; CHECK-GI-NEXT: ldr w17, [sp, #464]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v28.h[4], w9
+; CHECK-GI-NEXT: mov v27.h[6], w10
+; CHECK-GI-NEXT: ldr w16, [sp, #520]
+; CHECK-GI-NEXT: sbfx w10, w13, #8, #8
+; CHECK-GI-NEXT: smov w13, v30.h[0]
+; CHECK-GI-NEXT: mov v24.h[7], w8
+; CHECK-GI-NEXT: lsl w8, w17, #8
+; CHECK-GI-NEXT: mov v29.h[6], w11
+; CHECK-GI-NEXT: mul v26.8h, v23.8h, v26.8h
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: smov w16, v30.h[1]
+; CHECK-GI-NEXT: ldr w12, [sp, #528]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v28.h[5], w10
+; CHECK-GI-NEXT: mov v27.h[7], w14
+; CHECK-GI-NEXT: fmov s22, w13
+; CHECK-GI-NEXT: sbfx w10, w15, #8, #8
+; CHECK-GI-NEXT: smov w14, v30.h[4]
+; CHECK-GI-NEXT: mov v29.h[7], w8
+; CHECK-GI-NEXT: smov w15, v26.h[0]
+; CHECK-GI-NEXT: smov w13, v30.h[2]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #544]
+; CHECK-GI-NEXT: ldr w11, [sp, #552]
+; CHECK-GI-NEXT: mov v22.s[1], w16
+; CHECK-GI-NEXT: smov w16, v26.h[4]
+; CHECK-GI-NEXT: mov v28.h[6], w10
+; CHECK-GI-NEXT: smov w10, v26.h[1]
+; CHECK-GI-NEXT: fmov s23, w14
+; CHECK-GI-NEXT: smov w14, v26.h[5]
+; CHECK-GI-NEXT: mul v29.8h, v24.8h, v29.8h
+; CHECK-GI-NEXT: fmov s24, w15
+; CHECK-GI-NEXT: smov w15, v26.h[2]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: smov w8, v30.h[5]
+; CHECK-GI-NEXT: smov w17, v30.h[7]
+; CHECK-GI-NEXT: fmov s25, w16
+; CHECK-GI-NEXT: mov v22.s[2], w13
+; CHECK-GI-NEXT: smov w13, v30.h[3]
+; CHECK-GI-NEXT: mov v24.s[1], w10
+; CHECK-GI-NEXT: smov w16, v26.h[6]
+; CHECK-GI-NEXT: sbfx w10, w12, #8, #8
+; CHECK-GI-NEXT: smov w18, v29.h[0]
+; CHECK-GI-NEXT: smov w0, v29.h[1]
+; CHECK-GI-NEXT: ldr w12, [sp, #560]
+; CHECK-GI-NEXT: mov v25.s[1], w14
+; CHECK-GI-NEXT: smov w14, v26.h[7]
+; CHECK-GI-NEXT: mov v28.h[7], w10
+; CHECK-GI-NEXT: mov v22.s[3], w13
+; CHECK-GI-NEXT: smov w13, v26.h[3]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v24.s[2], w15
+; CHECK-GI-NEXT: smov w15, v29.h[2]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: fmov s26, w18
+; CHECK-GI-NEXT: mov v23.s[1], w8
+; CHECK-GI-NEXT: smov w8, v30.h[6]
+; CHECK-GI-NEXT: mov v25.s[2], w16
+; CHECK-GI-NEXT: lsl w16, w9, #8
+; CHECK-GI-NEXT: mul v31.8h, v27.8h, v28.8h
+; CHECK-GI-NEXT: ldr w10, [sp, #568]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #584]
+; CHECK-GI-NEXT: mov v24.s[3], w13
+; CHECK-GI-NEXT: smov w13, v29.h[4]
+; CHECK-GI-NEXT: mov v26.s[1], w0
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.s[2], w8
+; CHECK-GI-NEXT: mov v25.s[3], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #608]
+; CHECK-GI-NEXT: ldr w8, [sp, #576]
+; CHECK-GI-NEXT: fmov s8, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #616]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: fmov s27, w13
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: mov v26.s[2], w15
+; CHECK-GI-NEXT: smov w15, v29.h[5]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #624]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v8.h[1], w11
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v23.s[3], w17
+; CHECK-GI-NEXT: fmov s9, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #632]
+; CHECK-GI-NEXT: smov w17, v31.h[1]
+; CHECK-GI-NEXT: mov v27.s[1], w15
+; CHECK-GI-NEXT: smov w15, v31.h[0]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v8.h[2], w12
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v9.h[1], w16
+; CHECK-GI-NEXT: smov w16, v31.h[2]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #592]
+; CHECK-GI-NEXT: ldr w12, [sp, #600]
+; CHECK-GI-NEXT: fmov s28, w15
+; CHECK-GI-NEXT: smov w15, v29.h[6]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v8.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #640]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v9.h[2], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #672]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v28.s[1], w17
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v27.s[2], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #680]
+; CHECK-GI-NEXT: mov v8.h[4], w8
+; CHECK-GI-NEXT: smov w8, v31.h[4]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v9.h[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #688]
+; CHECK-GI-NEXT: mov v28.s[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #648]
+; CHECK-GI-NEXT: fmov s10, w14
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #656]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: fmov s30, w8
+; CHECK-GI-NEXT: sbfx w8, w10, #8, #8
+; CHECK-GI-NEXT: smov w10, v31.h[5]
+; CHECK-GI-NEXT: mov v8.h[5], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #696]
+; CHECK-GI-NEXT: mov v10.h[1], w15
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v9.h[4], w8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w8, [sp, #704]
+; CHECK-GI-NEXT: ldr w15, [sp, #664]
+; CHECK-GI-NEXT: ldr w17, [sp, #768]
+; CHECK-GI-NEXT: mov v30.s[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #744]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: mov v10.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #736]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v9.h[5], w16
+; CHECK-GI-NEXT: mov v8.h[6], w11
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #712]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #720]
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v10.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #752]
+; CHECK-GI-NEXT: mov v8.h[7], w12
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: lsl w18, w16, #8
+; CHECK-GI-NEXT: fmov s11, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #760]
+; CHECK-GI-NEXT: ldr w8, [sp, #784]
+; CHECK-GI-NEXT: mov v21.s[1], wzr
; CHECK-GI-NEXT: mov v16.s[1], wzr
-; CHECK-GI-NEXT: mla v0.4s, v10.4s, v29.4s
-; CHECK-GI-NEXT: fmov s10, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w11, [sp, #1024]
-; CHECK-GI-NEXT: mul v20.4s, v11.4s, v8.4s
-; CHECK-GI-NEXT: ldr q8, [sp] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v9.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #1008]
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: mov v10.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #1064]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v18.s[1], wzr
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v10.h[4], w12
+; CHECK-GI-NEXT: sbfx w12, w15, #8, #8
+; CHECK-GI-NEXT: mov v11.h[1], w10
+; CHECK-GI-NEXT: sbfx w10, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w9, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #776]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v9.h[6], w10
+; CHECK-GI-NEXT: lsl w10, w11, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #808]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v11.h[2], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #816]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
; CHECK-GI-NEXT: mov v17.s[1], wzr
-; CHECK-GI-NEXT: mov v3.s[1], wzr
-; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v9.h[7], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #824]
+; CHECK-GI-NEXT: sbfx w16, w11, #8, #8
+; CHECK-GI-NEXT: mov v10.h[5], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #832]
+; CHECK-GI-NEXT: mov v11.h[3], w13
+; CHECK-GI-NEXT: sbfx w15, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w17, #8
+; CHECK-GI-NEXT: fmov s12, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #872]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #840]
+; CHECK-GI-NEXT: sbfx w13, w18, #8, #8
+; CHECK-GI-NEXT: sbfx w17, w12, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #856]
+; CHECK-GI-NEXT: mov v12.h[1], w15
+; CHECK-GI-NEXT: mov v11.h[4], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #880]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v10.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #848]
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: sbfx w15, w16, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #888]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v20.s[1], wzr
+; CHECK-GI-NEXT: mov v12.h[2], w17
+; CHECK-GI-NEXT: lsl w17, w10, #8
+; CHECK-GI-NEXT: mov v11.h[5], w9
+; CHECK-GI-NEXT: fmov s13, w15
+; CHECK-GI-NEXT: ldr w9, [sp, #936]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w17, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #864]
+; CHECK-GI-NEXT: mov v12.h[3], w15
+; CHECK-GI-NEXT: mov v11.h[6], w8
+; CHECK-GI-NEXT: sbfx w8, w11, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #1000]
+; CHECK-GI-NEXT: mov v13.h[1], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #944]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #896]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v12.h[4], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #1008]
+; CHECK-GI-NEXT: fmov s14, w9
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v13.h[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #952]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w17, w8, #8
+; CHECK-GI-NEXT: smov w8, v29.h[3]
+; CHECK-GI-NEXT: smov w9, v29.h[7]
+; CHECK-GI-NEXT: fmov s29, w11
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v14.h[1], w15
+; CHECK-GI-NEXT: sbfx w15, w17, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #904]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v12.h[5], w13
+; CHECK-GI-NEXT: mov v13.h[3], w14
+; CHECK-GI-NEXT: mov v29.h[1], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #960]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #1016]
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #1024]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v14.h[2], w16
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #912]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v13.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #968]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v12.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #976]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v14.h[3], w15
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v29.h[2], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #1032]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #920]
+; CHECK-GI-NEXT: mov v26.s[3], w8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v14.h[4], w11
+; CHECK-GI-NEXT: mov v29.h[3], w13
+; CHECK-GI-NEXT: ldr w11, [sp, #984]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v13.h[5], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #1040]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #928]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v12.h[7], w10
+; CHECK-GI-NEXT: mov v27.s[3], w9
+; CHECK-GI-NEXT: mov v14.h[5], w12
+; CHECK-GI-NEXT: mov v29.h[4], w15
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v13.h[6], w14
+; CHECK-GI-NEXT: ldr w12, [sp, #1048]
+; CHECK-GI-NEXT: sbfx w14, w16, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #728]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mul v15.8h, v8.8h, v12.8h
+; CHECK-GI-NEXT: smov w16, v31.h[6]
+; CHECK-GI-NEXT: mov v14.h[6], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #992]
+; CHECK-GI-NEXT: mov v29.h[5], w14
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v13.h[7], w13
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #792]
+; CHECK-GI-NEXT: ldr w14, [sp, #1056]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v30.s[2], w16
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: smov w8, v15.h[1]
+; CHECK-GI-NEXT: smov w9, v15.h[5]
+; CHECK-GI-NEXT: mov v29.h[6], w12
+; CHECK-GI-NEXT: lsl w12, w13, #8
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: mov v10.h[7], w11
+; CHECK-GI-NEXT: mov v14.h[7], w10
+; CHECK-GI-NEXT: mul v12.8h, v9.8h, v13.8h
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: smov w10, v15.h[0]
+; CHECK-GI-NEXT: smov w11, v15.h[4]
+; CHECK-GI-NEXT: smov w14, v31.h[7]
+; CHECK-GI-NEXT: smov w15, v31.h[3]
+; CHECK-GI-NEXT: mov v11.h[7], w12
+; CHECK-GI-NEXT: mov v29.h[7], w13
; CHECK-GI-NEXT: mov v6.s[1], wzr
+; CHECK-GI-NEXT: mul v13.8h, v10.8h, v14.8h
+; CHECK-GI-NEXT: smov w12, v12.h[0]
+; CHECK-GI-NEXT: smov w13, v12.h[1]
+; CHECK-GI-NEXT: mov v7.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: mov v5.s[1], wzr
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], wzr
-; CHECK-GI-NEXT: mov v10.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #1080]
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mov v9.s[3], w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w10, [sp, #1032]
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: mov v29.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #1072]
-; CHECK-GI-NEXT: mov v19.s[3], wzr
-; CHECK-GI-NEXT: mov v21.s[3], wzr
+; CHECK-GI-NEXT: fmov s31, w11
+; CHECK-GI-NEXT: mov v30.s[3], w14
+; CHECK-GI-NEXT: smov w11, v12.h[4]
+; CHECK-GI-NEXT: mul v14.8h, v11.8h, v29.8h
+; CHECK-GI-NEXT: fmov s29, w10
+; CHECK-GI-NEXT: smov w10, v15.h[2]
+; CHECK-GI-NEXT: smov w14, v13.h[0]
+; CHECK-GI-NEXT: fmov s8, w12
+; CHECK-GI-NEXT: smov w16, v13.h[1]
+; CHECK-GI-NEXT: mov v31.s[1], w9
+; CHECK-GI-NEXT: smov w9, v12.h[2]
+; CHECK-GI-NEXT: mov v28.s[3], w15
+; CHECK-GI-NEXT: mov v29.s[1], w8
+; CHECK-GI-NEXT: smov w8, v15.h[6]
+; CHECK-GI-NEXT: smov w15, v12.h[5]
+; CHECK-GI-NEXT: mov v8.s[1], w13
+; CHECK-GI-NEXT: fmov s9, w11
+; CHECK-GI-NEXT: smov w12, v15.h[3]
+; CHECK-GI-NEXT: fmov s10, w14
+; CHECK-GI-NEXT: smov w14, v13.h[2]
+; CHECK-GI-NEXT: smov w11, v12.h[6]
+; CHECK-GI-NEXT: smov w13, v15.h[7]
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: mov v31.s[2], w8
+; CHECK-GI-NEXT: smov w8, v13.h[4]
+; CHECK-GI-NEXT: mov v29.s[2], w10
+; CHECK-GI-NEXT: mov v10.s[1], w16
+; CHECK-GI-NEXT: smov w16, v14.h[0]
+; CHECK-GI-NEXT: mov v8.s[2], w9
+; CHECK-GI-NEXT: smov w9, v13.h[5]
+; CHECK-GI-NEXT: smov w10, v12.h[3]
+; CHECK-GI-NEXT: mov v9.s[1], w15
+; CHECK-GI-NEXT: smov w15, v13.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
-; CHECK-GI-NEXT: mul w8, w8, w9
+; CHECK-GI-NEXT: mov v0.s[1], wzr
+; CHECK-GI-NEXT: fmov s11, w8
+; CHECK-GI-NEXT: smov w8, v14.h[1]
+; CHECK-GI-NEXT: mov v29.s[3], w12
+; CHECK-GI-NEXT: mov v10.s[2], w14
+; CHECK-GI-NEXT: smov w14, v12.h[7]
+; CHECK-GI-NEXT: fmov s12, w16
+; CHECK-GI-NEXT: smov w12, v14.h[4]
+; CHECK-GI-NEXT: mov v8.s[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #536]
+; CHECK-GI-NEXT: mov v11.s[1], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #272]
+; CHECK-GI-NEXT: mov v9.s[2], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #800]
+; CHECK-GI-NEXT: mov v12.s[1], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #1064]
+; CHECK-GI-NEXT: mov v31.s[3], w13
+; CHECK-GI-NEXT: smov w13, v14.h[5]
+; CHECK-GI-NEXT: sxtb w9, w9
; CHECK-GI-NEXT: sxtb w10, w10
; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v11.s[2], w15
+; CHECK-GI-NEXT: smov w15, v13.h[3]
+; CHECK-GI-NEXT: smov w16, v13.h[7]
+; CHECK-GI-NEXT: fmov s13, w12
+; CHECK-GI-NEXT: mul w9, w9, w10
+; CHECK-GI-NEXT: smov w12, v14.h[2]
+; CHECK-GI-NEXT: mul w8, w11, w8
+; CHECK-GI-NEXT: mov v19.s[2], wzr
+; CHECK-GI-NEXT: mov v21.s[2], wzr
; CHECK-GI-NEXT: mov v16.s[2], wzr
+; CHECK-GI-NEXT: mov v18.s[2], wzr
; CHECK-GI-NEXT: mov v17.s[2], wzr
-; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v13.s[1], w13
+; CHECK-GI-NEXT: smov w13, v14.h[6]
+; CHECK-GI-NEXT: sxth w9, w9
+; CHECK-GI-NEXT: sxth w10, w8
+; CHECK-GI-NEXT: mov v20.s[2], wzr
; CHECK-GI-NEXT: mov v6.s[2], wzr
+; CHECK-GI-NEXT: mov v7.s[2], wzr
; CHECK-GI-NEXT: mov v2.s[2], wzr
-; CHECK-GI-NEXT: mov v5.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v7.s[2], wzr
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mov v29.s[2], w10
-; CHECK-GI-NEXT: mov v10.s[3], w11
-; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s
-; CHECK-GI-NEXT: ldr w9, [sp, #976]
-; CHECK-GI-NEXT: fmov s21, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #1040]
+; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v5.s[2], wzr
+; CHECK-GI-NEXT: add v22.4s, v22.4s, v23.4s
+; CHECK-GI-NEXT: add v25.4s, v24.4s, v25.4s
+; CHECK-GI-NEXT: fmov s23, w9
+; CHECK-GI-NEXT: fmov s24, w10
+; CHECK-GI-NEXT: mov v12.s[2], w12
+; CHECK-GI-NEXT: mov v13.s[2], w13
+; CHECK-GI-NEXT: smov w8, v14.h[3]
+; CHECK-GI-NEXT: smov w9, v14.h[7]
; CHECK-GI-NEXT: mov v1.s[2], wzr
+; CHECK-GI-NEXT: mov v0.s[2], wzr
+; CHECK-GI-NEXT: mov v19.s[3], wzr
+; CHECK-GI-NEXT: mov v21.s[3], wzr
; CHECK-GI-NEXT: mov v16.s[3], wzr
+; CHECK-GI-NEXT: mov v18.s[3], wzr
; CHECK-GI-NEXT: mov v17.s[3], wzr
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v11.16b, v8.16b
-; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v20.s[3], wzr
; CHECK-GI-NEXT: mov v6.s[3], wzr
+; CHECK-GI-NEXT: mov v7.s[3], wzr
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: mov v5.s[3], wzr
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v7.s[3], wzr
-; CHECK-GI-NEXT: mov v25.s[1], wzr
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mul v8.4s, v13.4s, v9.4s
-; CHECK-GI-NEXT: mul v9.4s, v14.4s, v10.4s
-; CHECK-GI-NEXT: mov v23.s[3], w9
-; CHECK-GI-NEXT: mov v29.s[3], w8
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v5.s[3], wzr
+; CHECK-GI-NEXT: mov v23.s[1], wzr
+; CHECK-GI-NEXT: mov v24.s[1], wzr
+; CHECK-GI-NEXT: mov v9.s[3], w14
+; CHECK-GI-NEXT: mov v10.s[3], w15
+; CHECK-GI-NEXT: mov v11.s[3], w16
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: mov v11.s[3], wzr
-; CHECK-GI-NEXT: add v16.4s, v16.4s, v17.4s
-; CHECK-GI-NEXT: add v3.4s, v3.4s, v6.4s
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s
-; CHECK-GI-NEXT: add v4.4s, v4.4s, v7.4s
-; CHECK-GI-NEXT: mov v25.s[2], wzr
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mla v20.4s, v28.4s, v22.4s
-; CHECK-GI-NEXT: mla v8.4s, v31.4s, v23.4s
-; CHECK-GI-NEXT: mla v9.4s, v12.4s, v29.4s
-; CHECK-GI-NEXT: add v5.4s, v19.4s, v16.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v18.4s
-; CHECK-GI-NEXT: add v3.4s, v11.4s, v3.4s
+; CHECK-GI-NEXT: mov v12.s[3], w8
+; CHECK-GI-NEXT: mov v13.s[3], w9
+; CHECK-GI-NEXT: mov v0.s[3], wzr
+; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s
+; CHECK-GI-NEXT: add v16.4s, v16.4s, v18.4s
+; CHECK-GI-NEXT: add v17.4s, v17.4s, v20.4s
+; CHECK-GI-NEXT: add v6.4s, v6.4s, v7.4s
; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v4.4s, v27.4s, v30.4s
-; CHECK-GI-NEXT: add v6.4s, v24.4s, v26.4s
-; CHECK-GI-NEXT: ldr x29, [sp, #80] // 8-byte Folded Reload
-; CHECK-GI-NEXT: mov v25.s[3], wzr
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v20.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v5.4s
-; CHECK-GI-NEXT: add v5.4s, v8.4s, v9.4s
-; CHECK-GI-NEXT: add v2.4s, v3.4s, v2.4s
-; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s
-; CHECK-GI-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
-; CHECK-GI-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v25.4s, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: add v2.4s, v21.4s, v2.4s
-; CHECK-GI-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v3.4s, v1.4s
+; CHECK-GI-NEXT: add v3.4s, v3.4s, v5.4s
+; CHECK-GI-NEXT: mov v23.s[2], wzr
+; CHECK-GI-NEXT: mov v24.s[2], wzr
+; CHECK-GI-NEXT: add v26.4s, v26.4s, v27.4s
+; CHECK-GI-NEXT: add v27.4s, v28.4s, v30.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v19.4s
+; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s
+; CHECK-GI-NEXT: add v5.4s, v29.4s, v31.4s
+; CHECK-GI-NEXT: add v7.4s, v8.4s, v9.4s
+; CHECK-GI-NEXT: add v16.4s, v10.4s, v11.4s
+; CHECK-GI-NEXT: add v17.4s, v12.4s, v13.4s
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v6.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v23.s[3], wzr
+; CHECK-GI-NEXT: mov v24.s[3], wzr
+; CHECK-GI-NEXT: add v3.4s, v22.4s, v25.4s
+; CHECK-GI-NEXT: add v6.4s, v26.4s, v27.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v4.4s
+; CHECK-GI-NEXT: add v4.4s, v5.4s, v7.4s
+; CHECK-GI-NEXT: add v5.4s, v16.4s, v17.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v2.4s
+; CHECK-GI-NEXT: ldr x29, [sp, #64] // 8-byte Folded Reload
+; CHECK-GI-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v2.4s, v3.4s, v6.4s
+; CHECK-GI-NEXT: add v1.4s, v23.4s, v1.4s
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: add v0.4s, v24.4s, v0.4s
+; CHECK-GI-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: fmov w9, s0
; CHECK-GI-NEXT: add w0, w8, w9
-; CHECK-GI-NEXT: add sp, sp, #96
+; CHECK-GI-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
; CHECK-GI-NEXT: ret
entry:
%az = sext <33 x i8> %a to <33 x i32>
diff --git a/llvm/test/CodeGen/AArch64/neon-extmul.ll b/llvm/test/CodeGen/AArch64/neon-extmul.ll
index c82f8e1..84b634d 100644
--- a/llvm/test/CodeGen/AArch64/neon-extmul.ll
+++ b/llvm/test/CodeGen/AArch64/neon-extmul.ll
@@ -12,10 +12,9 @@ define <8 x i32> @extmuls_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmuls_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: smull v0.4s, v2.4h, v1.4h
-; CHECK-GI-NEXT: smull2 v1.4s, v2.8h, v1.8h
+; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i32>
@@ -34,10 +33,9 @@ define <8 x i32> @extmulu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmulu_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umull v0.4s, v2.4h, v1.4h
-; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v1.8h
+; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i32>
@@ -79,12 +77,9 @@ define <8 x i32> @extmuladds_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b)
;
; CHECK-GI-LABEL: extmuladds_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: smlal v2.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: smlal2 v3.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
+; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: saddw v0.4s, v2.4s, v1.4h
+; CHECK-GI-NEXT: saddw2 v1.4s, v3.4s, v1.8h
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i32>
@@ -104,12 +99,9 @@ define <8 x i32> @extmuladdu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b)
;
; CHECK-GI-LABEL: extmuladdu_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umlal v2.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: umlal2 v3.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
+; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v1.4h
+; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v1.8h
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i32>
@@ -163,16 +155,13 @@ define <8 x i64> @extmuls_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmuls_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: sshll v2.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v3.4s, v1.4h, #0
-; CHECK-GI-NEXT: sshll2 v4.4s, v0.8h, #0
-; CHECK-GI-NEXT: sshll2 v5.4s, v1.8h, #0
-; CHECK-GI-NEXT: smull v0.2d, v2.2s, v3.2s
-; CHECK-GI-NEXT: smull2 v1.2d, v2.4s, v3.4s
-; CHECK-GI-NEXT: smull v2.2d, v4.2s, v5.2s
-; CHECK-GI-NEXT: smull2 v3.2d, v4.4s, v5.4s
+; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll2 v3.4s, v0.8h, #0
+; CHECK-GI-NEXT: sshll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: sshll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: sshll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: sshll2 v3.2d, v3.4s, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i64>
@@ -195,16 +184,13 @@ define <8 x i64> @extmulu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmulu_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll v2.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v4.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v5.4s, v1.8h, #0
-; CHECK-GI-NEXT: umull v0.2d, v2.2s, v3.2s
-; CHECK-GI-NEXT: umull2 v1.2d, v2.4s, v3.4s
-; CHECK-GI-NEXT: umull v2.2d, v4.2s, v5.2s
-; CHECK-GI-NEXT: umull2 v3.2d, v4.4s, v5.4s
+; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v0.8h, #0
+; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i64>
@@ -263,20 +249,13 @@ define <8 x i64> @extmuladds_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b)
;
; CHECK-GI-LABEL: extmuladds_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: sshll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: smlal v2.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: smlal2 v3.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: smlal v4.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: smlal2 v5.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll2 v6.4s, v0.8h, #0
+; CHECK-GI-NEXT: saddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: saddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: saddw v2.2d, v4.2d, v6.2s
+; CHECK-GI-NEXT: saddw2 v3.2d, v5.2d, v6.4s
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i64>
@@ -301,20 +280,13 @@ define <8 x i64> @extmuladdu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b)
;
; CHECK-GI-LABEL: extmuladdu_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: umlal v2.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: umlal2 v3.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: umlal v4.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: umlal2 v5.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v6.4s, v0.8h, #0
+; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v6.2s
+; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v6.4s
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i64>
diff --git a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
index 17ad298..3caac1d 100644
--- a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
+++ b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
@@ -1,40 +1,72 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
%struct.anon = type { ptr, ptr }
@ptr_wrapper = common global ptr null, align 8
define i32 @test_func_i32_two_uses(i32 %in, i32 %bit, i32 %mask) {
-; CHECK-LABEL: test_func_i32_two_uses:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: adrp x8, :got:ptr_wrapper
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
-; CHECK-NEXT: ldr x9, [x8]
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB0_3
-; CHECK-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: str xzr, [x9, #8]
-; CHECK-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: lsl w1, w1, #1
-; CHECK-NEXT: cbz w1, .LBB0_6
-; CHECK-NEXT: .LBB0_3: // %do.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ands w10, w1, w0
-; CHECK-NEXT: and w11, w2, w0
-; CHECK-NEXT: cinc w8, w8, ne
-; CHECK-NEXT: cmp w10, w11
-; CHECK-NEXT: b.eq .LBB0_1
-; CHECK-NEXT: // %bb.4: // %do.body
-; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: cbnz w2, .LBB0_1
-; CHECK-NEXT: // %bb.5: // %do.body
-; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: cbz w10, .LBB0_2
-; CHECK-NEXT: b .LBB0_1
-; CHECK-NEXT: .LBB0_6: // %do.end
-; CHECK-NEXT: mov w0, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_func_i32_two_uses:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-SD-NEXT: ldr x9, [x8]
+; CHECK-SD-NEXT: mov w8, wzr
+; CHECK-SD-NEXT: b .LBB0_3
+; CHECK-SD-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: str xzr, [x9, #8]
+; CHECK-SD-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: lsl w1, w1, #1
+; CHECK-SD-NEXT: cbz w1, .LBB0_6
+; CHECK-SD-NEXT: .LBB0_3: // %do.body
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: ands w10, w1, w0
+; CHECK-SD-NEXT: and w11, w2, w0
+; CHECK-SD-NEXT: cinc w8, w8, ne
+; CHECK-SD-NEXT: cmp w10, w11
+; CHECK-SD-NEXT: b.eq .LBB0_1
+; CHECK-SD-NEXT: // %bb.4: // %do.body
+; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: cbnz w2, .LBB0_1
+; CHECK-SD-NEXT: // %bb.5: // %do.body
+; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: cbz w10, .LBB0_2
+; CHECK-SD-NEXT: b .LBB0_1
+; CHECK-SD-NEXT: .LBB0_6: // %do.end
+; CHECK-SD-NEXT: mov w0, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_func_i32_two_uses:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-GI-NEXT: ldr x9, [x8]
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: b .LBB0_3
+; CHECK-GI-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: str xzr, [x9, #8]
+; CHECK-GI-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: lsl w1, w1, #1
+; CHECK-GI-NEXT: cbz w1, .LBB0_6
+; CHECK-GI-NEXT: .LBB0_3: // %do.body
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: and w10, w1, w0
+; CHECK-GI-NEXT: tst w1, w0
+; CHECK-GI-NEXT: and w11, w2, w0
+; CHECK-GI-NEXT: cinc w8, w8, ne
+; CHECK-GI-NEXT: cmp w10, w11
+; CHECK-GI-NEXT: b.eq .LBB0_1
+; CHECK-GI-NEXT: // %bb.4: // %do.body
+; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: cbnz w2, .LBB0_1
+; CHECK-GI-NEXT: // %bb.5: // %do.body
+; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: cbz w10, .LBB0_2
+; CHECK-GI-NEXT: b .LBB0_1
+; CHECK-GI-NEXT: .LBB0_6: // %do.end
+; CHECK-GI-NEXT: mov w0, w8
+; CHECK-GI-NEXT: ret
entry:
%0 = load ptr, ptr @ptr_wrapper, align 8
%result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
@@ -70,28 +102,52 @@ do.end: ; preds = %4
}
define i32 @test_func_i64_one_use(i64 %in, i64 %bit, i64 %mask) {
-; CHECK-LABEL: test_func_i64_one_use:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: adrp x8, :got:ptr_wrapper
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
-; CHECK-NEXT: ldr x9, [x8]
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB1_2
-; CHECK-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: lsl x1, x1, #1
-; CHECK-NEXT: cbz x1, .LBB1_4
-; CHECK-NEXT: .LBB1_2: // %do.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ands x10, x1, x0
-; CHECK-NEXT: orr x10, x2, x10
-; CHECK-NEXT: cinc w8, w8, ne
-; CHECK-NEXT: cbz x10, .LBB1_1
-; CHECK-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: str xzr, [x9, #8]
-; CHECK-NEXT: b .LBB1_1
-; CHECK-NEXT: .LBB1_4: // %do.end
-; CHECK-NEXT: mov w0, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_func_i64_one_use:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-SD-NEXT: ldr x9, [x8]
+; CHECK-SD-NEXT: mov w8, wzr
+; CHECK-SD-NEXT: b .LBB1_2
+; CHECK-SD-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
+; CHECK-SD-NEXT: lsl x1, x1, #1
+; CHECK-SD-NEXT: cbz x1, .LBB1_4
+; CHECK-SD-NEXT: .LBB1_2: // %do.body
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: ands x10, x1, x0
+; CHECK-SD-NEXT: orr x10, x2, x10
+; CHECK-SD-NEXT: cinc w8, w8, ne
+; CHECK-SD-NEXT: cbz x10, .LBB1_1
+; CHECK-SD-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
+; CHECK-SD-NEXT: str xzr, [x9, #8]
+; CHECK-SD-NEXT: b .LBB1_1
+; CHECK-SD-NEXT: .LBB1_4: // %do.end
+; CHECK-SD-NEXT: mov w0, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_func_i64_one_use:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-GI-NEXT: ldr x9, [x8]
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: b .LBB1_2
+; CHECK-GI-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
+; CHECK-GI-NEXT: lsl x1, x1, #1
+; CHECK-GI-NEXT: cbz x1, .LBB1_4
+; CHECK-GI-NEXT: .LBB1_2: // %do.body
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: and x10, x1, x0
+; CHECK-GI-NEXT: tst x1, x0
+; CHECK-GI-NEXT: orr x10, x2, x10
+; CHECK-GI-NEXT: cinc w8, w8, ne
+; CHECK-GI-NEXT: cbz x10, .LBB1_1
+; CHECK-GI-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
+; CHECK-GI-NEXT: str xzr, [x9, #8]
+; CHECK-GI-NEXT: b .LBB1_1
+; CHECK-GI-NEXT: .LBB1_4: // %do.end
+; CHECK-GI-NEXT: mov w0, w8
+; CHECK-GI-NEXT: ret
entry:
%0 = load ptr, ptr @ptr_wrapper, align 8
%result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
@@ -124,11 +180,18 @@ do.end: ; preds = %4
}
define i64 @test_and1(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and1:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ands x8, x0, #0x3
-; CHECK-NEXT: csel x0, x8, x1, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and1:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ands x8, x0, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x1, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and1:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and x8, x0, #0x3
+; CHECK-GI-NEXT: tst x0, #0x3
+; CHECK-GI-NEXT: csel x0, x8, x1, eq
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%c = icmp eq i64 %a, 0
%s = select i1 %c, i64 %a, i64 %y
@@ -148,23 +211,43 @@ define i64 @test_and2(i64 %x, i64 %y) {
}
define i64 @test_and3(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and3:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: mov x20, x0
-; CHECK-NEXT: mov x0, xzr
-; CHECK-NEXT: mov x19, x1
-; CHECK-NEXT: bl callee
-; CHECK-NEXT: ands x8, x20, #0x3
-; CHECK-NEXT: csel x0, x8, x19, eq
-; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and3:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-SD-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 32
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NEXT: .cfi_offset w30, -32
+; CHECK-SD-NEXT: mov x20, x0
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: mov x19, x1
+; CHECK-SD-NEXT: bl callee
+; CHECK-SD-NEXT: ands x8, x20, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x19, eq
+; CHECK-SD-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and3:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: and x21, x0, #0x3
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: mov x20, x1
+; CHECK-GI-NEXT: bl callee
+; CHECK-GI-NEXT: tst x19, #0x3
+; CHECK-GI-NEXT: csel x0, x21, x20, eq
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%b = call i64 @callee(i64 0)
%c = icmp eq i64 %a, 0
@@ -173,19 +256,37 @@ define i64 @test_and3(i64 %x, i64 %y) {
}
define i64 @test_and_4(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and_4:
-; CHECK: // %bb.0:
-; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: ands x0, x0, #0x3
-; CHECK-NEXT: bl callee
-; CHECK-NEXT: ands x8, x19, #0x3
-; CHECK-NEXT: csel x0, x8, x0, eq
-; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and_4:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: mov x19, x0
+; CHECK-SD-NEXT: ands x0, x0, #0x3
+; CHECK-SD-NEXT: bl callee
+; CHECK-SD-NEXT: ands x8, x19, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x0, eq
+; CHECK-SD-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and_4:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: and x20, x0, #0x3
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: mov x0, x20
+; CHECK-GI-NEXT: bl callee
+; CHECK-GI-NEXT: tst x19, #0x3
+; CHECK-GI-NEXT: csel x0, x20, x0, eq
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%b = call i64 @callee(i64 %a)
%c = icmp eq i64 %a, 0
diff --git a/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll b/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll
index 2a77d4d..4206c0bc 100644
--- a/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll
+++ b/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll
@@ -27,11 +27,12 @@ define i32 @caller() nounwind ssp {
; CHECK-NEXT: sub sp, sp, #208
; CHECK-NEXT: mov w8, #10 ; =0xa
; CHECK-NEXT: mov w9, #9 ; =0x9
-; CHECK-NEXT: mov w10, #8 ; =0x8
+; CHECK-NEXT: mov w0, #1 ; =0x1
; CHECK-NEXT: stp x9, x8, [sp, #24]
-; CHECK-NEXT: mov w8, #7 ; =0x7
+; CHECK-NEXT: mov w8, #8 ; =0x8
; CHECK-NEXT: mov w9, #6 ; =0x6
-; CHECK-NEXT: mov w0, #1 ; =0x1
+; CHECK-NEXT: str x8, [sp, #16]
+; CHECK-NEXT: mov w8, #7 ; =0x7
; CHECK-NEXT: mov w1, #2 ; =0x2
; CHECK-NEXT: mov w2, #3 ; =0x3
; CHECK-NEXT: mov w3, #4 ; =0x4
@@ -46,8 +47,7 @@ define i32 @caller() nounwind ssp {
; CHECK-NEXT: stp x22, x21, [sp, #160] ; 16-byte Folded Spill
; CHECK-NEXT: stp x20, x19, [sp, #176] ; 16-byte Folded Spill
; CHECK-NEXT: stp x29, x30, [sp, #192] ; 16-byte Folded Spill
-; CHECK-NEXT: stp x8, x10, [sp, #8]
-; CHECK-NEXT: str x9, [sp]
+; CHECK-NEXT: stp x9, x8, [sp]
; CHECK-NEXT: bl _callee
; CHECK-NEXT: ldp x29, x30, [sp, #192] ; 16-byte Folded Reload
; CHECK-NEXT: ldp x20, x19, [sp, #176] ; 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/reassocmls.ll b/llvm/test/CodeGen/AArch64/reassocmls.ll
index acbf9fc..0909fbf 100644
--- a/llvm/test/CodeGen/AArch64/reassocmls.ll
+++ b/llvm/test/CodeGen/AArch64/reassocmls.ll
@@ -1,12 +1,25 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-elf -mattr=+sve2 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -mattr=+sve2 -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+; CHECK-GI: warning: Instruction selection used fallback path for smlsl_nxv8i16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for umlsl_nxv8i16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mls_nxv8i16
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mla_nxv8i16
define i64 @smlsl_i64(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
-; CHECK-LABEL: smlsl_i64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: smsubl x8, w4, w3, x0
-; CHECK-NEXT: smsubl x0, w2, w1, x8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: smlsl_i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: smsubl x8, w4, w3, x0
+; CHECK-SD-NEXT: smsubl x0, w2, w1, x8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: smlsl_i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: smull x8, w2, w1
+; CHECK-GI-NEXT: smaddl x8, w4, w3, x8
+; CHECK-GI-NEXT: sub x0, x0, x8
+; CHECK-GI-NEXT: ret
%be = sext i32 %b to i64
%ce = sext i32 %c to i64
%de = sext i32 %d to i64
@@ -19,11 +32,18 @@ define i64 @smlsl_i64(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
}
define i64 @umlsl_i64(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
-; CHECK-LABEL: umlsl_i64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: umsubl x8, w4, w3, x0
-; CHECK-NEXT: umsubl x0, w2, w1, x8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: umlsl_i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: umsubl x8, w4, w3, x0
+; CHECK-SD-NEXT: umsubl x0, w2, w1, x8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: umlsl_i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: umull x8, w2, w1
+; CHECK-GI-NEXT: umaddl x8, w4, w3, x8
+; CHECK-GI-NEXT: sub x0, x0, x8
+; CHECK-GI-NEXT: ret
%be = zext i32 %b to i64
%ce = zext i32 %c to i64
%de = zext i32 %d to i64
@@ -36,11 +56,18 @@ define i64 @umlsl_i64(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
}
define i64 @mls_i64(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e) {
-; CHECK-LABEL: mls_i64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: msub x8, x4, x3, x0
-; CHECK-NEXT: msub x0, x2, x1, x8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mls_i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: msub x8, x4, x3, x0
+; CHECK-SD-NEXT: msub x0, x2, x1, x8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mls_i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mul x8, x2, x1
+; CHECK-GI-NEXT: madd x8, x4, x3, x8
+; CHECK-GI-NEXT: sub x0, x0, x8
+; CHECK-GI-NEXT: ret
%m1.neg = mul i64 %c, %b
%m2.neg = mul i64 %e, %d
%reass.add = add i64 %m2.neg, %m1.neg
@@ -49,11 +76,18 @@ define i64 @mls_i64(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e) {
}
define i16 @mls_i16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e) {
-; CHECK-LABEL: mls_i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: msub w8, w4, w3, w0
-; CHECK-NEXT: msub w0, w2, w1, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mls_i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: msub w8, w4, w3, w0
+; CHECK-SD-NEXT: msub w0, w2, w1, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mls_i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mul w8, w2, w1
+; CHECK-GI-NEXT: madd w8, w4, w3, w8
+; CHECK-GI-NEXT: sub w0, w0, w8
+; CHECK-GI-NEXT: ret
%m1.neg = mul i16 %c, %b
%m2.neg = mul i16 %e, %d
%reass.add = add i16 %m2.neg, %m1.neg
@@ -91,12 +125,20 @@ define i64 @mls_i64_C(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e) {
}
define i64 @umlsl_i64_muls(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
-; CHECK-LABEL: umlsl_i64_muls:
-; CHECK: // %bb.0:
-; CHECK-NEXT: umull x8, w2, w3
-; CHECK-NEXT: umsubl x8, w4, w3, x8
-; CHECK-NEXT: umsubl x0, w2, w1, x8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: umlsl_i64_muls:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: umull x8, w2, w3
+; CHECK-SD-NEXT: umsubl x8, w4, w3, x8
+; CHECK-SD-NEXT: umsubl x0, w2, w1, x8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: umlsl_i64_muls:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: umull x8, w2, w1
+; CHECK-GI-NEXT: umull x9, w2, w3
+; CHECK-GI-NEXT: umaddl x8, w4, w3, x8
+; CHECK-GI-NEXT: sub x0, x9, x8
+; CHECK-GI-NEXT: ret
%be = zext i32 %b to i64
%ce = zext i32 %c to i64
%de = zext i32 %d to i64
@@ -110,13 +152,21 @@ define i64 @umlsl_i64_muls(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
}
define i64 @umlsl_i64_uses(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
-; CHECK-LABEL: umlsl_i64_uses:
-; CHECK: // %bb.0:
-; CHECK-NEXT: umull x8, w4, w3
-; CHECK-NEXT: umaddl x8, w2, w1, x8
-; CHECK-NEXT: sub x9, x0, x8
-; CHECK-NEXT: and x0, x8, x9
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: umlsl_i64_uses:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: umull x8, w4, w3
+; CHECK-SD-NEXT: umaddl x8, w2, w1, x8
+; CHECK-SD-NEXT: sub x9, x0, x8
+; CHECK-SD-NEXT: and x0, x8, x9
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: umlsl_i64_uses:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: umull x8, w2, w1
+; CHECK-GI-NEXT: umaddl x8, w4, w3, x8
+; CHECK-GI-NEXT: sub x9, x0, x8
+; CHECK-GI-NEXT: and x0, x8, x9
+; CHECK-GI-NEXT: ret
%be = zext i32 %b to i64
%ce = zext i32 %c to i64
%de = zext i32 %d to i64
@@ -175,11 +225,18 @@ define i64 @mla_i64_mul(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e) {
define <8 x i16> @smlsl_v8i16(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d, <8 x i8> %e) {
-; CHECK-LABEL: smlsl_v8i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: smlsl v0.8h, v4.8b, v3.8b
-; CHECK-NEXT: smlsl v0.8h, v2.8b, v1.8b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: smlsl_v8i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: smlsl v0.8h, v4.8b, v3.8b
+; CHECK-SD-NEXT: smlsl v0.8h, v2.8b, v1.8b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: smlsl_v8i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: smull v1.8h, v2.8b, v1.8b
+; CHECK-GI-NEXT: smlal v1.8h, v4.8b, v3.8b
+; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: ret
%be = sext <8 x i8> %b to <8 x i16>
%ce = sext <8 x i8> %c to <8 x i16>
%de = sext <8 x i8> %d to <8 x i16>
@@ -192,11 +249,18 @@ define <8 x i16> @smlsl_v8i16(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %
}
define <8 x i16> @umlsl_v8i16(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d, <8 x i8> %e) {
-; CHECK-LABEL: umlsl_v8i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: umlsl v0.8h, v4.8b, v3.8b
-; CHECK-NEXT: umlsl v0.8h, v2.8b, v1.8b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: umlsl_v8i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: umlsl v0.8h, v4.8b, v3.8b
+; CHECK-SD-NEXT: umlsl v0.8h, v2.8b, v1.8b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: umlsl_v8i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: umull v1.8h, v2.8b, v1.8b
+; CHECK-GI-NEXT: umlal v1.8h, v4.8b, v3.8b
+; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: ret
%be = zext <8 x i8> %b to <8 x i16>
%ce = zext <8 x i8> %c to <8 x i16>
%de = zext <8 x i8> %d to <8 x i16>
@@ -209,11 +273,18 @@ define <8 x i16> @umlsl_v8i16(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %
}
define <8 x i16> @mls_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d, <8 x i16> %e) {
-; CHECK-LABEL: mls_v8i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mls v0.8h, v4.8h, v3.8h
-; CHECK-NEXT: mls v0.8h, v2.8h, v1.8h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mls_v8i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mls v0.8h, v4.8h, v3.8h
+; CHECK-SD-NEXT: mls v0.8h, v2.8h, v1.8h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mls_v8i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mul v1.8h, v2.8h, v1.8h
+; CHECK-GI-NEXT: mla v1.8h, v4.8h, v3.8h
+; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT: ret
%m1.neg = mul <8 x i16> %c, %b
%m2.neg = mul <8 x i16> %e, %d
%reass.add = add <8 x i16> %m2.neg, %m1.neg
@@ -236,12 +307,20 @@ define <8 x i16> @mla_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16>
}
define <8 x i16> @mls_v8i16_C(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d, <8 x i16> %e) {
-; CHECK-LABEL: mls_v8i16_C:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi v0.8h, #10
-; CHECK-NEXT: mls v0.8h, v4.8h, v3.8h
-; CHECK-NEXT: mls v0.8h, v2.8h, v1.8h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mls_v8i16_C:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.8h, #10
+; CHECK-SD-NEXT: mls v0.8h, v4.8h, v3.8h
+; CHECK-SD-NEXT: mls v0.8h, v2.8h, v1.8h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mls_v8i16_C:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v1.8h
+; CHECK-GI-NEXT: movi v1.8h, #10
+; CHECK-GI-NEXT: mla v0.8h, v4.8h, v3.8h
+; CHECK-GI-NEXT: sub v0.8h, v1.8h, v0.8h
+; CHECK-GI-NEXT: ret
%m1.neg = mul <8 x i16> %c, %b
%m2.neg = mul <8 x i16> %e, %d
%reass.add = add <8 x i16> %m2.neg, %m1.neg
@@ -250,13 +329,21 @@ define <8 x i16> @mls_v8i16_C(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16
}
define <8 x i16> @mla_v8i16_C(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d, <8 x i16> %e) {
-; CHECK-LABEL: mla_v8i16_C:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mul v1.8h, v2.8h, v1.8h
-; CHECK-NEXT: movi v0.8h, #10
-; CHECK-NEXT: mla v1.8h, v4.8h, v3.8h
-; CHECK-NEXT: add v0.8h, v1.8h, v0.8h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mla_v8i16_C:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mul v1.8h, v2.8h, v1.8h
+; CHECK-SD-NEXT: movi v0.8h, #10
+; CHECK-SD-NEXT: mla v1.8h, v4.8h, v3.8h
+; CHECK-SD-NEXT: add v0.8h, v1.8h, v0.8h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mla_v8i16_C:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v1.8h
+; CHECK-GI-NEXT: movi v1.8h, #10
+; CHECK-GI-NEXT: mla v0.8h, v4.8h, v3.8h
+; CHECK-GI-NEXT: add v0.8h, v1.8h, v0.8h
+; CHECK-GI-NEXT: ret
%m1.neg = mul <8 x i16> %c, %b
%m2.neg = mul <8 x i16> %e, %d
%reass.add = add <8 x i16> %m2.neg, %m1.neg
diff --git a/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir b/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir
new file mode 100644
index 0000000..aecb90a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir
@@ -0,0 +1,23 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=aarch64 -start-before=register-coalescer -stop-after=virtregrewriter -enable-subreg-liveness=false -o - %s | FileCheck %s
+# RUN: llc -mtriple=aarch64 -start-before=register-coalescer -stop-after=virtregrewriter -enable-subreg-liveness=true -o - %s | FileCheck %s
+---
+name: test
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x1
+ ; CHECK-LABEL: name: test
+ ; CHECK: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x0 = COPY $x1
+ ; CHECK-NEXT: renamable $w1 = ORRWrr $wzr, renamable $w0, implicit-def renamable $x1
+ ; CHECK-NEXT: RET_ReallyLR implicit $x1, implicit $x0
+ %190:gpr64 = COPY killed $x1
+ %191:gpr32 = COPY %190.sub_32:gpr64
+ %192:gpr32 = ORRWrr $wzr, killed %191:gpr32
+ %193:gpr64all = SUBREG_TO_REG 0, killed %192:gpr32, %subreg.sub_32
+ $x0 = COPY killed %190:gpr64
+ $x1 = COPY killed %193:gpr64all
+ RET_ReallyLR implicit $x1, implicit $x0
+...
diff --git a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir
index 08fc47d..eb6242c 100644
--- a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir
+++ b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir
@@ -7,9 +7,18 @@
# CHECK-DBG: ********** JOINING INTERVALS ***********
# CHECK-DBG: ********** INTERVALS **********
# CHECK-DBG: %0 [16r,32r:0) 0@16r weight:0.000000e+00
-# CHECK-DBG: %3 [48r,112r:0) 0@48r L0000000000000040 [48r,112r:0) 0@48r weight:0.000000e+00
-# CHECK-DBG: %4 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000080 [112e,112d:0) 0@112e L0000000000000040 [80r,112e:1)[112e,112d:0) 0@112e 1@80r weight:0.000000e+00
+# CHECK-DBG: %3 [48r,112r:0) 0@48r L0000000000000080 [48r,112r:0) 0@48r L0000000000000040 [48r,112r:0) 0@48r weight:0.000000e+00
+# CHECK-DBG: %4 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000080 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000040 [80r,112e:1)[112e,112d:0) 0@112e 1@80r weight:0.000000e+00
# CHECK-DBG: %5 [32r,112r:1)[112r,112d:0) 0@112r 1@32r weight:0.000000e+00
+# CHECK-DBG: ********** MACHINEINSTRS **********
+# CHECK-DBG: 0B bb.0.entry:
+# CHECK-DBG: 16B %0:gpr64sp = ADDXri %stack.0, 0, 0
+# CHECK-DBG: 32B %5:gpr64common = nuw ADDXri %0:gpr64sp, 64, 0
+# CHECK-DBG: 48B undef %3.sub_32:gpr64 = MOVi32imm 64, implicit-def %3:gpr64
+# CHECK-DBG: 80B undef %4.sub_32:gpr64 = MOVi32imm 64, implicit-def %4:gpr64
+# CHECK-DBG: 112B dead %5:gpr64common, dead early-clobber %4:gpr64 = MOPSMemorySetPseudo %5:gpr64common(tied-def 0), %4:gpr64(tied-def 1), %3:gpr64, implicit-def dead $nzcv
+# CHECK-DBG: 128B RET_ReallyLR
+
---
name: test
tracksRegLiveness: true
@@ -43,9 +52,44 @@ body: |
# CHECK-DBG: %1 [32r,48B:2)[48B,320r:0)[320r,368B:1) 0@48B-phi 1@320r 2@32r
# CHECK-DBG-SAME: weight:0.000000e+00
# CHECK-DBG: %3 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi
-# CHECK-DBG-SAME: L0000000000000080 [288r,304B:0)[304B,320r:3) 0@288r 1@x 2@x 3@304B-phi
+# CHECK-DBG-SAME: L0000000000000080 [240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@x 3@304B-phi
# CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi
# CHECK-DBG-SAME: weight:0.000000e+00
+# CHECK-DBG: ********** MACHINEINSTRS **********
+# CHECK-DBG: 0B bb.0:
+# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%)
+# CHECK-DBG: 32B %1:gpr64 = IMPLICIT_DEF
+# CHECK-DBG: 48B bb.1:
+# CHECK-DBG: ; predecessors: %bb.0, %bb.7
+# CHECK-DBG: successors: %bb.2(0x80000000); %bb.2(100.00%)
+# CHECK-DBG: 64B bb.2:
+# CHECK-DBG: ; predecessors: %bb.1
+# CHECK-DBG: successors: %bb.3(0x80000000); %bb.3(100.00%)
+# CHECK-DBG: 80B undef %3.sub_32:gpr64 = MOVi32imm 1
+# CHECK-DBG: 96B bb.3:
+# CHECK-DBG: ; predecessors: %bb.2
+# CHECK-DBG: successors: %bb.7(0x40000000), %bb.4(0x40000000); %bb.7(50.00%), %bb.4(50.00%)
+# CHECK-DBG: 112B $nzcv = IMPLICIT_DEF
+# CHECK-DBG: 144B Bcc 1, %bb.7, implicit killed $nzcv
+# CHECK-DBG: 160B bb.4:
+# CHECK-DBG: ; predecessors: %bb.3
+# CHECK-DBG: successors: %bb.6(0x40000000), %bb.5(0x40000000); %bb.6(50.00%), %bb.5(50.00%)
+# CHECK-DBG: 176B $nzcv = IMPLICIT_DEF
+# CHECK-DBG: 192B Bcc 1, %bb.6, implicit killed $nzcv
+# CHECK-DBG: 208B bb.5:
+# CHECK-DBG: ; predecessors: %bb.4
+# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%)
+# CHECK-DBG: 240B undef %3.sub_32:gpr64 = MOVi32imm 1, implicit-def %3:gpr64
+# CHECK-DBG: 256B B %bb.7
+# CHECK-DBG: 272B bb.6:
+# CHECK-DBG: ; predecessors: %bb.4
+# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%)
+# CHECK-DBG: 288B %3:gpr64 = COPY $xzr
+# CHECK-DBG: 304B bb.7:
+# CHECK-DBG: ; predecessors: %bb.3, %bb.5, %bb.6
+# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%)
+# CHECK-DBG: 320B %1:gpr64 = ADDXrs %1:gpr64, %3:gpr64, 1
+# CHECK-DBG: 352B B %bb.1
---
name: reproducer
tracksRegLiveness: true
@@ -92,6 +136,42 @@ body: |
# CHECK-DBG-SAME: L0000000000000080 [224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@x 3@288B-phi
# CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@80r 3@288B-phi
# CHECK-DBG-SAME: weight:0.000000e+00
+# CHECK-DBG: ********** MACHINEINSTRS **********
+# CHECK-DBG: 0B bb.0:
+# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%)
+# CHECK-DBG: 32B %1:gpr64 = IMPLICIT_DEF
+# CHECK-DBG: 48B bb.1:
+# CHECK-DBG: ; predecessors: %bb.0, %bb.7
+# CHECK-DBG: successors: %bb.2(0x80000000); %bb.2(100.00%)
+# CHECK-DBG: 64B bb.2:
+# CHECK-DBG: ; predecessors: %bb.1
+# CHECK-DBG: successors: %bb.3(0x80000000); %bb.3(100.00%)
+# CHECK-DBG: 80B undef %3.sub_32:gpr64 = MOVi32imm 1
+# CHECK-DBG: 96B bb.3:
+# CHECK-DBG: ; predecessors: %bb.2
+# CHECK-DBG: successors: %bb.7(0x40000000), %bb.4(0x40000000); %bb.7(50.00%), %bb.4(50.00%)
+# CHECK-DBG: 112B $nzcv = IMPLICIT_DEF
+# CHECK-DBG: 144B Bcc 1, %bb.7, implicit killed $nzcv
+# CHECK-DBG: 160B bb.4:
+# CHECK-DBG: ; predecessors: %bb.3
+# CHECK-DBG: successors: %bb.6(0x40000000), %bb.5(0x40000000); %bb.6(50.00%), %bb.5(50.00%)
+# CHECK-DBG: 176B $nzcv = IMPLICIT_DEF
+# CHECK-DBG: 192B Bcc 1, %bb.6, implicit killed $nzcv
+# CHECK-DBG: 208B bb.5:
+# CHECK-DBG: ; predecessors: %bb.4
+# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%)
+# CHECK-DBG: 224B %3:gpr64 = IMPLICIT_DEF
+# CHECK-DBG: 240B B %bb.7
+# CHECK-DBG: 256B bb.6:
+# CHECK-DBG: ; predecessors: %bb.4
+# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%)
+# CHECK-DBG: 272B %3:gpr64 = COPY $xzr
+# CHECK-DBG: 288B bb.7:
+# CHECK-DBG: ; predecessors: %bb.3, %bb.5, %bb.6
+# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%)
+# CHECK-DBG: 304B %1:gpr64 = ADDXrs %1:gpr64, %3:gpr64, 1
+# CHECK-DBG: 336B B %bb.1
+
---
name: reproducer2
tracksRegLiveness: true
@@ -127,3 +207,78 @@ body: |
B %bb.1
...
+# CHECK-DBG: ********** REGISTER COALESCER **********
+# CHECK-DBG: ********** Function: reproducer3
+# CHECK-DBG: ********** JOINING INTERVALS ***********
+# CHECK-DBG: ********** INTERVALS **********
+# CHECK-DBG: W0 [0B,32r:0)[320r,336r:1) 0@0B-phi 1@320r
+# CHECK-DBG: W1 [0B,16r:0) 0@0B-phi
+# CHECK-DBG: %0 [16r,64r:0) 0@16r weight:0.000000e+00
+# CHECK-DBG: %1 [32r,128r:0) 0@32r weight:0.000000e+00
+# CHECK-DBG: %2 [48r,64r:0) 0@48r weight:0.000000e+00
+# CHECK-DBG: %3 [64r,80r:0) 0@64r weight:0.000000e+00
+# CHECK-DBG: %4 [80r,176r:0) 0@80r weight:0.000000e+00
+# CHECK-DBG: %7 [112r,128r:1)[128r,256r:0)[304B,320r:0) 0@128r 1@112r
+# CHECK-DBG-SAME: L0000000000000080 [128r,256r:0)[304B,320r:0) 0@128r
+# CHECK-DBG-SAME: L0000000000000040 [112r,128r:1)[128r,256r:0)[304B,320r:0) 0@128r 1@112r
+# CHECK-DBG-SAME: weight:0.000000e+00
+# CHECK-DBG: %8 [96r,176r:1)[176r,192r:0) 0@176r 1@96r weight:0.000000e+00
+# CHECK-DBG: %9 [256r,272r:0) 0@256r weight:0.000000e+00
+# CHECK-DBG: ********** MACHINEINSTRS **********
+# CHECK-DBG: 0B bb.0:
+# CHECK-DBG: successors: %bb.2(0x40000000), %bb.1(0x40000000); %bb.2(50.00%), %bb.1(50.00%)
+# CHECK-DBG: liveins: $w0, $w1
+# CHECK-DBG: 16B %0:gpr32 = COPY $w1
+# CHECK-DBG: 32B %1:gpr32 = COPY $w0
+# CHECK-DBG: 48B %2:gpr32 = UBFMWri %1:gpr32, 31, 30
+# CHECK-DBG: 64B %3:gpr32 = SUBWrs %2:gpr32, %0:gpr32, 1
+# CHECK-DBG: 80B %4:gpr32 = UBFMWri %3:gpr32, 1, 31
+# CHECK-DBG: 96B %8:gpr32common = MOVi32imm 1
+# CHECK-DBG: 112B undef %7.sub_32:gpr64 = MOVi32imm 1
+# CHECK-DBG: 128B undef %7.sub_32:gpr64 = BFMWri %7.sub_32:gpr64(tied-def 0), %1:gpr32, 31, 30, implicit-def %7:gpr64
+# CHECK-DBG: 176B %8:gpr32common = BFMWri %8:gpr32common(tied-def 0), %4:gpr32, 30, 29
+# CHECK-DBG: 192B dead $wzr = SUBSWri %8:gpr32common, 0, 0, implicit-def $nzcv
+# CHECK-DBG: 208B Bcc 2, %bb.2, implicit killed $nzcv
+# CHECK-DBG: 224B B %bb.1
+# CHECK-DBG: 240B bb.1:
+# CHECK-DBG: ; predecessors: %bb.0
+# CHECK-DBG: 256B %9:gpr64common = UBFMXri %7:gpr64, 62, 61
+# CHECK-DBG: 272B dead $xzr = LDRXui %9:gpr64common, 0
+# CHECK-DBG: 288B RET_ReallyLR
+# CHECK-DBG: 304B bb.2:
+# CHECK-DBG: ; predecessors: %bb.0
+# CHECK-DBG: 320B $x0 = COPY %7:gpr64
+# CHECK-DBG: 336B RET_ReallyLR implicit $x0
+
+---
+name: reproducer3
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ %0:gpr32 = COPY killed $w1
+ %1:gpr32 = COPY killed $w0
+ %3:gpr32 = UBFMWri %1, 31, 30
+ %4:gpr32 = SUBWrs killed %3, killed %0, 1
+ %5:gpr32 = UBFMWri killed %4, 1, 31
+ %6:gpr32 = MOVi32imm 1
+ %7:gpr32 = COPY %6
+ %7:gpr32 = BFMWri %7, killed %1, 31, 30
+ %8:gpr64 = SUBREG_TO_REG 0, killed %7, %subreg.sub_32
+ %9:gpr32common = COPY killed %6
+ %9:gpr32common = BFMWri %9, killed %5, 30, 29
+ dead $wzr = SUBSWri killed %9, 0, 0, implicit-def $nzcv
+ Bcc 2, %bb.2, implicit killed $nzcv
+ B %bb.1
+
+ bb.1:
+ %10:gpr64common = UBFMXri killed %8, 62, 61
+ dead $xzr = LDRXui killed %10, 0
+ RET_ReallyLR
+
+ bb.2:
+ $x0 = COPY killed %8
+ RET_ReallyLR implicit killed $x0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/selectopt-const.ll b/llvm/test/CodeGen/AArch64/selectopt-const.ll
index a44c746..fe48dba 100644
--- a/llvm/test/CodeGen/AArch64/selectopt-const.ll
+++ b/llvm/test/CodeGen/AArch64/selectopt-const.ll
@@ -29,8 +29,8 @@ define i32 @test_const(ptr %in1, ptr %in2, ptr %out, i32 %n, ptr %tbl) {
; CHECK-NEXT: csel x10, x9, xzr, lt
; CHECK-NEXT: subs x8, x8, #1
; CHECK-NEXT: ldr s3, [x4, x10]
-; CHECK-NEXT: fcvtzs w10, s3
-; CHECK-NEXT: str w10, [x2], #4
+; CHECK-NEXT: fcvtzs s3, s3
+; CHECK-NEXT: st1 { v3.s }[0], [x2], #4
; CHECK-NEXT: b.ne .LBB0_2
; CHECK-NEXT: .LBB0_3: // %for.cond.cleanup
; CHECK-NEXT: mov w0, wzr
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
index c63899c..19ac03d 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s --check-prefixes=STRIDED
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CONTIGUOUS
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1,+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CONTIGUOUS
define <vscale x 32 x i8> @ld1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x2_i8_z0_z8:
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
index 05241f7..039b621 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s --check-prefixes=STRIDED
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CONTIGUOUS
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1,+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CONTIGUOUS
define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; STRIDED-LABEL: ldnt1_x2_i8_z0_z8:
diff --git a/llvm/test/CodeGen/AArch64/store-float-conversion.ll b/llvm/test/CodeGen/AArch64/store-float-conversion.ll
new file mode 100644
index 0000000..c46801f
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/store-float-conversion.ll
@@ -0,0 +1,131 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=aarch64 < %s | FileCheck %s
+
+define void @f32_to_u8(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_u8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu s0, s0
+; CHECK-NEXT: str b0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptoui float %f to i32
+ %trunc = trunc i32 %conv to i8
+ store i8 %trunc, ptr %dst
+ ret void
+}
+
+define void @f32_to_s8(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_s8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs s0, s0
+; CHECK-NEXT: str b0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ %trunc = trunc i32 %conv to i8
+ store i8 %trunc, ptr %dst
+ ret void
+}
+
+define void @f32_to_u16(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_u16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu s0, s0
+; CHECK-NEXT: str h0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptoui float %f to i32
+ %trunc = trunc i32 %conv to i16
+ store i16 %trunc, ptr %dst
+ ret void
+}
+
+define void @f32_to_s16(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_s16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs s0, s0
+; CHECK-NEXT: str h0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ %trunc = trunc i32 %conv to i16
+ store i16 %trunc, ptr %dst
+ ret void
+}
+
+define void @f32_to_u32(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_u32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu s0, s0
+; CHECK-NEXT: str s0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptoui float %f to i32
+ store i32 %conv, ptr %dst
+ ret void
+}
+
+define void @f32_to_s32(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_s32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs s0, s0
+; CHECK-NEXT: str s0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ store i32 %conv, ptr %dst
+ ret void
+}
+
+define void @f32_to_s64(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_s64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs w8, s0
+; CHECK-NEXT: sxtw x8, w8
+; CHECK-NEXT: str x8, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ %ext = sext i32 %conv to i64
+ store i64 %ext, ptr %dst
+ ret void
+}
+
+define void @f64_to_u64(double %d, ptr %dst) {
+; CHECK-LABEL: f64_to_u64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu d0, d0
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptoui double %d to i64
+ store i64 %conv, ptr %dst
+ ret void
+}
+
+define void @f64_to_s64(double %d, ptr %dst) {
+; CHECK-LABEL: f64_to_s64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs d0, d0
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi double %d to i64
+ store i64 %conv, ptr %dst
+ ret void
+}
+
+define i32 @f32_to_i32_multiple_uses(float %f, ptr %dst) {
+; CHECK-LABEL: f32_to_i32_multiple_uses:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs w8, s0
+; CHECK-NEXT: mov x9, x0
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: strb w8, [x9]
+; CHECK-NEXT: ret
+entry:
+ %conv = fptosi float %f to i32
+ %trunc = trunc i32 %conv to i8
+ store i8 %trunc, ptr %dst
+ ret i32 %conv
+}
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index aa0a163..5fc996a 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -63,7 +63,8 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcmp s2, #0.0
; CHECK-NEXT: fcsel s2, s0, s3, mi
; CHECK-NEXT: subs w10, w10, #1
-; CHECK-NEXT: fcvtzs w11, s2
+; CHECK-NEXT: fcvtzs s2, s2
+; CHECK-NEXT: fmov w11, s2
; CHECK-NEXT: strb w11, [x9], #1
; CHECK-NEXT: b.ne .LBB0_7
; CHECK-NEXT: .LBB0_8: // %for.cond.cleanup
@@ -178,12 +179,12 @@ define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcmp s3, s1
; CHECK-NEXT: fcsel s4, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
-; CHECK-NEXT: fcvtzs w11, s2
+; CHECK-NEXT: fcvtzs s2, s2
; CHECK-NEXT: fcsel s3, s0, s4, mi
; CHECK-NEXT: subs w10, w10, #1
-; CHECK-NEXT: strb w11, [x9]
-; CHECK-NEXT: fcvtzs w12, s3
-; CHECK-NEXT: strb w12, [x9, #1]
+; CHECK-NEXT: str b2, [x9]
+; CHECK-NEXT: fcvtzs s3, s3
+; CHECK-NEXT: stur b3, [x9, #1]
; CHECK-NEXT: add x9, x9, #2
; CHECK-NEXT: b.ne .LBB1_6
; CHECK-NEXT: .LBB1_7: // %for.cond.cleanup
@@ -395,19 +396,19 @@ define void @loop3(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcsel s4, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
; CHECK-NEXT: ldr s3, [x8, #8]
-; CHECK-NEXT: fcvtzs w11, s2
+; CHECK-NEXT: fcvtzs s2, s2
; CHECK-NEXT: add x8, x8, #12
; CHECK-NEXT: fcsel s4, s0, s4, mi
; CHECK-NEXT: fcmp s3, s1
-; CHECK-NEXT: strb w11, [x9]
+; CHECK-NEXT: str b2, [x9]
; CHECK-NEXT: fcsel s5, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
-; CHECK-NEXT: fcvtzs w12, s4
+; CHECK-NEXT: fcvtzs s4, s4
; CHECK-NEXT: fcsel s3, s0, s5, mi
; CHECK-NEXT: subs w10, w10, #1
-; CHECK-NEXT: strb w12, [x9, #1]
-; CHECK-NEXT: fcvtzs w13, s3
-; CHECK-NEXT: strb w13, [x9, #2]
+; CHECK-NEXT: stur b4, [x9, #1]
+; CHECK-NEXT: fcvtzs s3, s3
+; CHECK-NEXT: stur b3, [x9, #2]
; CHECK-NEXT: add x9, x9, #3
; CHECK-NEXT: b.ne .LBB2_8
; CHECK-NEXT: .LBB2_9: // %for.cond.cleanup
@@ -563,26 +564,26 @@ define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcmp s3, s1
; CHECK-NEXT: fcsel s4, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
-; CHECK-NEXT: fcvtzs w11, s2
+; CHECK-NEXT: fcvtzs s2, s2
; CHECK-NEXT: ldp s3, s5, [x8, #8]
; CHECK-NEXT: add x8, x8, #16
; CHECK-NEXT: fcsel s4, s0, s4, mi
; CHECK-NEXT: fcmp s3, s1
-; CHECK-NEXT: strb w11, [x9]
-; CHECK-NEXT: fcvtzs w12, s4
+; CHECK-NEXT: str b2, [x9]
+; CHECK-NEXT: fcvtzs s4, s4
; CHECK-NEXT: fcsel s6, s1, s3, gt
; CHECK-NEXT: fcmp s3, #0.0
; CHECK-NEXT: fcsel s3, s0, s6, mi
; CHECK-NEXT: fcmp s5, s1
-; CHECK-NEXT: strb w12, [x9, #1]
+; CHECK-NEXT: stur b4, [x9, #1]
; CHECK-NEXT: fcsel s6, s1, s5, gt
; CHECK-NEXT: fcmp s5, #0.0
-; CHECK-NEXT: fcvtzs w13, s3
-; CHECK-NEXT: fcsel s2, s0, s6, mi
+; CHECK-NEXT: fcvtzs s3, s3
+; CHECK-NEXT: fcsel s5, s0, s6, mi
; CHECK-NEXT: subs w10, w10, #1
-; CHECK-NEXT: strb w13, [x9, #2]
-; CHECK-NEXT: fcvtzs w14, s2
-; CHECK-NEXT: strb w14, [x9, #3]
+; CHECK-NEXT: stur b3, [x9, #2]
+; CHECK-NEXT: fcvtzs s5, s5
+; CHECK-NEXT: stur b5, [x9, #3]
; CHECK-NEXT: add x9, x9, #4
; CHECK-NEXT: b.ne .LBB3_6
; CHECK-NEXT: .LBB3_7: // %for.cond.cleanup
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index 290a473..74d1165 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -1907,11 +1907,8 @@ define i32 @test_udot_v8i8(<8 x i8> %a, <8 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_udot_v8i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: umull v2.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: umlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: addv s0, v2.4s
+; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
; CHECK-GI-BASE-NEXT: fmov w0, s0
; CHECK-GI-BASE-NEXT: ret
;
@@ -1952,17 +1949,13 @@ define i32 @test_udot_v16i8(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_udot_v16i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: umull v4.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull v5.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: umlal2 v4.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: umlal2 v5.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: umull v2.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: uaddlv s1, v2.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v16i8:
@@ -2018,36 +2011,21 @@ define i32 @test_udot_v24i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v24i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: ldr q2, [x0]
-; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: ushll v6.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v2.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v7.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v4.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v5.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: umull v16.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: umull v17.4s, v4.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull v18.4s, v5.4h, v3.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: umlal2 v16.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: umlal2 v17.4s, v4.8h, v2.8h
-; CHECK-GI-BASE-NEXT: umlal2 v18.4s, v5.8h, v3.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldr q0, [x0]
+; CHECK-GI-BASE-NEXT: ldr q1, [x1]
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: umull v4.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: umull v1.8h, v3.8b, v2.8b
+; CHECK-GI-BASE-NEXT: uaddlv s2, v4.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v24i8:
@@ -2118,61 +2096,33 @@ define i32 @test_udot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s2, wzr
-; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32]
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: fmov s3, wzr
-; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32]
-; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1]
-; CHECK-GI-BASE-NEXT: ushll v23.8h, v16.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll v20.8h, v19.8b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll2 v19.8h, v19.16b, #0
-; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0]
-; CHECK-GI-BASE-NEXT: ushll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v5.8h, v5.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v6.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v7.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v16.8h, v16.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr
-; CHECK-GI-BASE-NEXT: ushll v21.8h, v18.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v18.8h, v18.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v22.8h, v17.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v17.8h, v17.16b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr
-; CHECK-GI-BASE-NEXT: umull v28.4s, v20.4h, v23.4h
-; CHECK-GI-BASE-NEXT: umull v29.4s, v19.4h, v16.4h
-; CHECK-GI-BASE-NEXT: umull v24.4s, v4.4h, v21.4h
-; CHECK-GI-BASE-NEXT: umull v25.4s, v5.4h, v18.4h
-; CHECK-GI-BASE-NEXT: umull v26.4s, v6.4h, v22.4h
-; CHECK-GI-BASE-NEXT: umull v27.4s, v7.4h, v17.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr
-; CHECK-GI-BASE-NEXT: umlal2 v28.4s, v20.8h, v23.8h
-; CHECK-GI-BASE-NEXT: umlal2 v29.4s, v19.8h, v16.8h
-; CHECK-GI-BASE-NEXT: umlal2 v24.4s, v4.8h, v21.8h
-; CHECK-GI-BASE-NEXT: umlal2 v25.4s, v5.8h, v18.8h
-; CHECK-GI-BASE-NEXT: umlal2 v26.4s, v6.8h, v22.8h
-; CHECK-GI-BASE-NEXT: umlal2 v27.4s, v7.8h, v17.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s
-; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s
-; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0]
+; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32]
+; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1]
+; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32]
+; CHECK-GI-BASE-NEXT: umull v7.8h, v5.8b, v3.8b
+; CHECK-GI-BASE-NEXT: umull2 v3.8h, v5.16b, v3.16b
+; CHECK-GI-BASE-NEXT: umull v6.8h, v2.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v2.16b, v0.16b
+; CHECK-GI-BASE-NEXT: umull2 v2.8h, v4.16b, v1.16b
+; CHECK-GI-BASE-NEXT: umull v1.8h, v4.8b, v1.8b
+; CHECK-GI-BASE-NEXT: uaddlv s5, v7.8h
+; CHECK-GI-BASE-NEXT: uaddlv s3, v3.8h
+; CHECK-GI-BASE-NEXT: uaddlv s4, v6.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: uaddlv s2, v2.8h
+; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: fmov w8, s4
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w10, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v48i8:
@@ -2225,11 +2175,8 @@ define i32 @test_sdot_v8i8(<8 x i8> %a, <8 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v8i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: smull v2.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: smlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: addv s0, v2.4s
+; CHECK-GI-BASE-NEXT: smull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
; CHECK-GI-BASE-NEXT: fmov w0, s0
; CHECK-GI-BASE-NEXT: ret
;
@@ -2270,17 +2217,13 @@ define i32 @test_sdot_v16i8(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v16i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v0.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: smull v4.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull v5.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: smlal2 v4.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: smlal2 v5.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: smull v2.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: saddlv s1, v2.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v16i8:
@@ -2336,36 +2279,21 @@ define i32 @test_sdot_v24i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v24i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: ldr q2, [x0]
-; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: sshll v6.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v2.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v7.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v4.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v5.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: smull v16.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: smull v17.4s, v4.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull v18.4s, v5.4h, v3.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: smlal2 v16.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: smlal2 v17.4s, v4.8h, v2.8h
-; CHECK-GI-BASE-NEXT: smlal2 v18.4s, v5.8h, v3.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldr q0, [x0]
+; CHECK-GI-BASE-NEXT: ldr q1, [x1]
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: smull v4.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: smull v1.8h, v3.8b, v2.8b
+; CHECK-GI-BASE-NEXT: saddlv s2, v4.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v24i8:
@@ -2436,61 +2364,33 @@ define i32 @test_sdot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s2, wzr
-; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32]
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: fmov s3, wzr
-; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32]
-; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1]
-; CHECK-GI-BASE-NEXT: sshll v23.8h, v16.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll v20.8h, v19.8b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll2 v19.8h, v19.16b, #0
-; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0]
-; CHECK-GI-BASE-NEXT: sshll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v5.8h, v5.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v6.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v7.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v16.8h, v16.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr
-; CHECK-GI-BASE-NEXT: sshll v21.8h, v18.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v18.8h, v18.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v22.8h, v17.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v17.8h, v17.16b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr
-; CHECK-GI-BASE-NEXT: smull v28.4s, v20.4h, v23.4h
-; CHECK-GI-BASE-NEXT: smull v29.4s, v19.4h, v16.4h
-; CHECK-GI-BASE-NEXT: smull v24.4s, v4.4h, v21.4h
-; CHECK-GI-BASE-NEXT: smull v25.4s, v5.4h, v18.4h
-; CHECK-GI-BASE-NEXT: smull v26.4s, v6.4h, v22.4h
-; CHECK-GI-BASE-NEXT: smull v27.4s, v7.4h, v17.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr
-; CHECK-GI-BASE-NEXT: smlal2 v28.4s, v20.8h, v23.8h
-; CHECK-GI-BASE-NEXT: smlal2 v29.4s, v19.8h, v16.8h
-; CHECK-GI-BASE-NEXT: smlal2 v24.4s, v4.8h, v21.8h
-; CHECK-GI-BASE-NEXT: smlal2 v25.4s, v5.8h, v18.8h
-; CHECK-GI-BASE-NEXT: smlal2 v26.4s, v6.8h, v22.8h
-; CHECK-GI-BASE-NEXT: smlal2 v27.4s, v7.8h, v17.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s
-; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s
-; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0]
+; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32]
+; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1]
+; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32]
+; CHECK-GI-BASE-NEXT: smull v7.8h, v5.8b, v3.8b
+; CHECK-GI-BASE-NEXT: smull2 v3.8h, v5.16b, v3.16b
+; CHECK-GI-BASE-NEXT: smull v6.8h, v2.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v2.16b, v0.16b
+; CHECK-GI-BASE-NEXT: smull2 v2.8h, v4.16b, v1.16b
+; CHECK-GI-BASE-NEXT: smull v1.8h, v4.8b, v1.8b
+; CHECK-GI-BASE-NEXT: saddlv s5, v7.8h
+; CHECK-GI-BASE-NEXT: saddlv s3, v3.8h
+; CHECK-GI-BASE-NEXT: saddlv s4, v6.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: saddlv s2, v2.8h
+; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: fmov w8, s4
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w10, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v48i8:
@@ -2549,18 +2449,27 @@ define i32 @test_udot_v8i8_multi_use(<8 x i8> %a, <8 x i8> %b) {
; CHECK-SD-DOT-NEXT: add w0, w8, w9
; CHECK-SD-DOT-NEXT: ret
;
-; CHECK-GI-LABEL: test_udot_v8i8_multi_use:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umull v2.4s, v1.4h, v0.4h
-; CHECK-GI-NEXT: mov v3.16b, v2.16b
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: umlal2 v3.4s, v1.8h, v0.8h
-; CHECK-GI-NEXT: addv s0, v3.4s
-; CHECK-GI-NEXT: fmov w9, s0
-; CHECK-GI-NEXT: add w0, w9, w8
-; CHECK-GI-NEXT: ret
+; CHECK-GI-BASE-LABEL: test_udot_v8i8_multi_use:
+; CHECK-GI-BASE: // %bb.0: // %entry
+; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: uaddlv s1, v0.8h
+; CHECK-GI-BASE-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
+; CHECK-GI-BASE-NEXT: ret
+;
+; CHECK-GI-DOT-LABEL: test_udot_v8i8_multi_use:
+; CHECK-GI-DOT: // %bb.0: // %entry
+; CHECK-GI-DOT-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: umull v3.8h, v1.8b, v0.8b
+; CHECK-GI-DOT-NEXT: udot v2.2s, v1.8b, v0.8b
+; CHECK-GI-DOT-NEXT: ushll v0.4s, v3.4h, #0
+; CHECK-GI-DOT-NEXT: fmov w9, s0
+; CHECK-GI-DOT-NEXT: addp v1.2s, v2.2s, v2.2s
+; CHECK-GI-DOT-NEXT: fmov w8, s1
+; CHECK-GI-DOT-NEXT: add w0, w8, w9
+; CHECK-GI-DOT-NEXT: ret
entry:
%0 = zext <8 x i8> %a to <8 x i32>
%1 = zext <8 x i8> %b to <8 x i32>
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/bug-legalization-artifact-combiner-dead-def.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/bug-legalization-artifact-combiner-dead-def.mir
index daf7b3a..8d13522 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/bug-legalization-artifact-combiner-dead-def.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/bug-legalization-artifact-combiner-dead-def.mir
@@ -38,7 +38,7 @@ body: |
; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV6]](s32)
; GFX10-NEXT: G_STORE [[COPY4]](s32), [[COPY]](p5) :: (store (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD]](p5) :: (store (s32) into unknown-address + 4, addrspace 5)
%0:_(p5) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
index 6d2f253..679d4a2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
@@ -189,29 +189,22 @@ define amdgpu_kernel void @kernel_caller_byval() {
; FLATSCR-NEXT: s_getpc_b64 s[0:1]
; FLATSCR-NEXT: s_add_u32 s0, s0, external_void_func_byval@rel32@lo+4
; FLATSCR-NEXT: s_addc_u32 s1, s1, external_void_func_byval@rel32@hi+12
-; FLATSCR-NEXT: s_add_u32 s2, s32, 8
-; FLATSCR-NEXT: s_add_u32 s3, s32, 16
-; FLATSCR-NEXT: s_add_u32 s4, s32, 24
-; FLATSCR-NEXT: s_add_u32 s5, s32, 32
-; FLATSCR-NEXT: s_add_u32 s6, s32, 40
-; FLATSCR-NEXT: s_add_u32 s7, s32, 48
-; FLATSCR-NEXT: s_add_u32 s8, s32, 56
; FLATSCR-NEXT: s_waitcnt vmcnt(7)
; FLATSCR-NEXT: scratch_store_dwordx2 off, v[0:1], s32
; FLATSCR-NEXT: s_waitcnt vmcnt(7)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[2:3], s2
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[2:3], s32 offset:8
; FLATSCR-NEXT: s_waitcnt vmcnt(7)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[4:5], s3
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[4:5], s32 offset:16
; FLATSCR-NEXT: s_waitcnt vmcnt(7)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[6:7], s4
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[6:7], s32 offset:24
; FLATSCR-NEXT: s_waitcnt vmcnt(7)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[8:9], s5
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[8:9], s32 offset:32
; FLATSCR-NEXT: s_waitcnt vmcnt(7)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[10:11], s6
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[10:11], s32 offset:40
; FLATSCR-NEXT: s_waitcnt vmcnt(7)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[12:13], s7
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[12:13], s32 offset:48
; FLATSCR-NEXT: s_waitcnt vmcnt(7)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[14:15], s8
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[14:15], s32 offset:56
; FLATSCR-NEXT: s_swappc_b64 s[30:31], s[0:1]
; FLATSCR-NEXT: s_endpgm
%alloca = alloca [16 x i32], align 4, addrspace(5)
@@ -391,49 +384,35 @@ define void @func_caller_byval(ptr addrspace(5) %argptr) {
; FLATSCR-NEXT: s_mov_b64 exec, s[2:3]
; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v0, off
; FLATSCR-NEXT: s_add_i32 s32, s32, 16
-; FLATSCR-NEXT: v_add_u32_e32 v3, 8, v0
; FLATSCR-NEXT: v_writelane_b32 v40, s0, 2
-; FLATSCR-NEXT: s_add_u32 s0, s32, 8
-; FLATSCR-NEXT: s_add_u32 s2, s32, 56
; FLATSCR-NEXT: v_writelane_b32 v40, s30, 0
+; FLATSCR-NEXT: s_getpc_b64 s[0:1]
+; FLATSCR-NEXT: s_add_u32 s0, s0, external_void_func_byval@rel32@lo+4
+; FLATSCR-NEXT: s_addc_u32 s1, s1, external_void_func_byval@rel32@hi+12
; FLATSCR-NEXT: v_writelane_b32 v40, s31, 1
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s32
-; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v3, off
-; FLATSCR-NEXT: v_add_u32_e32 v3, 16, v0
+; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v0, off offset:8
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s0
-; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v3, off
-; FLATSCR-NEXT: s_add_u32 s0, s32, 16
-; FLATSCR-NEXT: v_add_u32_e32 v3, 24, v0
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s32 offset:8
+; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v0, off offset:16
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s0
-; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v3, off
-; FLATSCR-NEXT: s_add_u32 s0, s32, 24
-; FLATSCR-NEXT: v_add_u32_e32 v3, 32, v0
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s32 offset:16
+; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v0, off offset:24
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s0
-; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v3, off
-; FLATSCR-NEXT: s_add_u32 s0, s32, 32
-; FLATSCR-NEXT: v_add_u32_e32 v3, 40, v0
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s32 offset:24
+; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v0, off offset:32
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s0
-; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v3, off
-; FLATSCR-NEXT: s_add_u32 s0, s32, 40
-; FLATSCR-NEXT: v_add_u32_e32 v3, 48, v0
-; FLATSCR-NEXT: v_add_u32_e32 v0, 56, v0
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s32 offset:32
+; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v0, off offset:40
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s0
-; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v3, off
-; FLATSCR-NEXT: s_add_u32 s0, s32, 48
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s32 offset:40
+; FLATSCR-NEXT: scratch_load_dwordx2 v[1:2], v0, off offset:48
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s0
-; FLATSCR-NEXT: scratch_load_dwordx2 v[0:1], v0, off
-; FLATSCR-NEXT: s_getpc_b64 s[0:1]
-; FLATSCR-NEXT: s_add_u32 s0, s0, external_void_func_byval@rel32@lo+4
-; FLATSCR-NEXT: s_addc_u32 s1, s1, external_void_func_byval@rel32@hi+12
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s32 offset:48
+; FLATSCR-NEXT: scratch_load_dwordx2 v[0:1], v0, off offset:56
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
-; FLATSCR-NEXT: scratch_store_dwordx2 off, v[0:1], s2
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[0:1], s32 offset:56
; FLATSCR-NEXT: s_swappc_b64 s[30:31], s[0:1]
; FLATSCR-NEXT: v_readlane_b32 s31, v40, 1
; FLATSCR-NEXT: v_readlane_b32 s30, v40, 0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
index b2a4c82..a066b15 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
@@ -2548,54 +2548,40 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
; UNALIGNED_GFX9-LABEL: store_load_i64_unaligned:
; UNALIGNED_GFX9: ; %bb.0: ; %bb
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v4, 15
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v1, 4, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v2, 2, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v3, 1, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v4, off
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v1, 15
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v4, 0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v6, 6, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v3, v4, off
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v1, 0
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:1
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v5, 3, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v2, v4, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:2
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v5, v4, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:3
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v7, 5, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v1, v4, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:4
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v7, v4, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:5
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v8, 7, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v6, v4, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:6
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v8, v4, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:7
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v4, v0, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr7
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr2
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr6
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr1
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr3
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr5
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr8
; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr0
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v4, v3, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:1 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v4, v2, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:2 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v4, v5, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:3 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v4, v1, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:4 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v4, v7, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:5 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v4, v6, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:6 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v4, v8, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:7 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -2604,98 +2590,77 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v2, 0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v3, 4, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v5, 2, v0
; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v1, off
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v1, 3, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v6, 5, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v7, 6, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v8, 7, v0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v4, v2, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:1
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v5, v2, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:2
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v1, v2, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:3
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v3, v2, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:4
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v6, v2, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:5
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v7, v2, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:6
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v8, v2, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:7
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v0, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v4, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:1 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v5, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:2 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v1, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:3 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v3, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:4 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v6, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:5 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v7, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:6 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v8, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v0, off offset:7 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX10-NEXT: s_setpc_b64 s[30:31]
;
; UNALIGNED_GFX942-LABEL: store_load_i64_unaligned:
; UNALIGNED_GFX942: ; %bb.0: ; %bb
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v4, 15
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v1, 4, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v2, 2, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v3, 1, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v1, 15
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v4, 0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v6, 6, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v3, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v1, 0
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:1 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v5, 3, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v2, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:2 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v5, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:3 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v7, 5, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v1, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v7, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:5 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v8, 7, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v6, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:6 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v8, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:7 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v4, v0, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr7
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr2
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr6
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr1
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr3
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr5
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr8
; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr0
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v4, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:1 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v4, v2, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:2 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v4, v5, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:3 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v4, v1, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v4, v7, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:5 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v4, v6, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:6 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v4, v8, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:7 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: s_setpc_b64 s[30:31]
;
@@ -2703,44 +2668,37 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
; UNALIGNED_GFX11: ; %bb.0: ; %bb
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_mov_b32 v2, 0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v4, 1, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v3, 4, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v5, 2, v0
; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v1, off dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v6, 5, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v7, 6, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v8, 7, v0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v4, v2, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:1 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v5, v2, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:2 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v1, v2, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:3 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v3, v2, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:4 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v6, v2, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:5 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v7, v2, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:6 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v8, v2, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:7 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v0, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v4, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:1 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v5, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:2 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v1, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:3 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v3, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:4 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v6, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:5 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v7, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:6 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v8, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v0, off offset:7 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX11-NEXT: s_setpc_b64 s[30:31]
;
@@ -2875,80 +2833,58 @@ define void @store_load_v3i32_unaligned(ptr addrspace(5) nocapture %arg) {
; UNALIGNED_GFX9-LABEL: store_load_v3i32_unaligned:
; UNALIGNED_GFX9: ; %bb.0: ; %bb
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v3, 1
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v1, 2
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v2, 2, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v4, 1, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v3, off
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v1, 1
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v3, 0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v6, 4, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v7, 6, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v9, 8, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v10, 10, v0
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v12, 3
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v4, v3, off
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v1, 0
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v2, 2
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:1
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v5, 3, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v2, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:2
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v5, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:3
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v8, 5, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v6, v1, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v2, off offset:4
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v8, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:5
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v1, 7, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v7, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:6
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v1, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:7
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v11, 9, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v9, v12, off
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v2, 3
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v2, off offset:8
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v11, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:9
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v12, 11, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v10, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:10
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v12, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:11
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v0, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr12
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr4
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr11
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr7
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr6
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr10
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr5
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr9
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr1
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr8
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr2
; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr0
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v4, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:1 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v2, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:2 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v5, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:3 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v6, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:4 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v8, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:5 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v7, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:6 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v1, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:7 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v9, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:8 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v11, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:9 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v10, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:10 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v12, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:11 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -2956,212 +2892,170 @@ define void @store_load_v3i32_unaligned(ptr addrspace(5) nocapture %arg) {
; UNALIGNED_GFX10: ; %bb.0: ; %bb
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 1
-; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v3, 0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v5, 1, v0
-; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v2, 2
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v4, 2, v0
+; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v2, 0
+; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v3, 2
; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v1, off
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v1, 3, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v6, 4, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v7, 5, v0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v5, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:1
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v4, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:2
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v8, 6, v0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v1, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:3
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v6, v2, off
+; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 3
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v3, off offset:4
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v7, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:5
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v2, 7, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v9, 8, v0
-; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v10, 3
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v11, 9, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v12, 10, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v13, 11, v0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v8, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:6
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v2, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:7
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v9, v10, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v1, off offset:8
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v11, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:9
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v12, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:10
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v13, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:11
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v0, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v5, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:1 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v4, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:2 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v1, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:3 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v6, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:4 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v7, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:5 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v8, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:6 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v2, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:7 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v9, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:8 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v11, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:9 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v12, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:10 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v13, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v0, off offset:11 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX10-NEXT: s_setpc_b64 s[30:31]
;
; UNALIGNED_GFX942-LABEL: store_load_v3i32_unaligned:
; UNALIGNED_GFX942: ; %bb.0: ; %bb
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v3, 1
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v1, 2
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v2, 2, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v4, 1, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v1, 1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v3, 0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v6, 4, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v7, 6, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v9, 8, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v10, 10, v0
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v12, 3
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v4, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v1, 0
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v2, 2
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:1 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v5, 3, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v2, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:2 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v5, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:3 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v8, 5, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v6, v1, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v2, off offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v8, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:5 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v1, 7, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v7, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:6 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v1, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:7 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v11, 9, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v9, v12, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v2, 3
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v2, off offset:8 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v11, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:9 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v12, 11, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v10, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:10 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v12, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:11 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v0, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr12
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr4
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr11
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr7
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr6
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr10
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr5
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr9
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr1
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr8
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr2
; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr0
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:1 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v2, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:2 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v5, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:3 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v6, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v8, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:5 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v7, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:6 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v1, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:7 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v9, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:8 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v11, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:9 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v10, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:10 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v12, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:11 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: s_setpc_b64 s[30:31]
;
; UNALIGNED_GFX11-LABEL: store_load_v3i32_unaligned:
; UNALIGNED_GFX11: ; %bb.0: ; %bb
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
-; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_add_nc_u32 v4, 2, v0
-; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v10, 3 :: v_dual_add_nc_u32 v5, 1, v0
+; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 0
+; UNALIGNED_GFX11-NEXT: v_mov_b32_e32 v3, 2
; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v1, off dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v6, 4, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v7, 5, v0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v5, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:1 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v4, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:2 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v8, 6, v0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v1, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:3 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v6, v2, off dlc
+; UNALIGNED_GFX11-NEXT: v_mov_b32_e32 v1, 3
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v3, off offset:4 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v7, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:5 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v2, 7, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v9, 8, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v11, 9, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v12, 10, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v13, 11, v0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v8, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:6 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v2, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:7 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v9, v10, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v1, off offset:8 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v11, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:9 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v12, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:10 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v13, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:11 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v0, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v5, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:1 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v4, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:2 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v1, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:3 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v6, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:4 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v7, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:5 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v8, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:6 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v2, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:7 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v9, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:8 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v11, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:9 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v12, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:10 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v13, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v0, off offset:11 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX11-NEXT: s_setpc_b64 s[30:31]
;
@@ -3320,104 +3214,74 @@ define void @store_load_v4i32_unaligned(ptr addrspace(5) nocapture %arg) {
; UNALIGNED_GFX9-LABEL: store_load_v4i32_unaligned:
; UNALIGNED_GFX9: ; %bb.0: ; %bb
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v3, 1
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v1, 2
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v2, 2, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v4, 1, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v3, off
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v1, 1
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v3, 0
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v6, 4
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v7, 4, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v8, 6, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v10, 8, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v11, 10, v0
-; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v13, 3
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v14, 12, v0
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v15, 14, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v4, v3, off
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v1, 0
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v2, 2
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:1
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v5, 3, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v2, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:2
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v5, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:3
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v9, 5, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v7, v1, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v2, off offset:4
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v9, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:5
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v1, 7, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v8, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:6
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v1, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:7
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v12, 9, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v10, v13, off
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v2, 3
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v2, off offset:8
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v12, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:9
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v13, 11, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v11, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:10
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v13, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:11
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v16, 13, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v14, v6, off
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v2, 4
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v2, off offset:12
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v16, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:13
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v6, 15, v0
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v15, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:14
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_store_byte v6, v3, off
+; UNALIGNED_GFX9-NEXT: scratch_store_byte v0, v1, off offset:15
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v0, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v4, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:1 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v2, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:2 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v5, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:3 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v7, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:4 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v9, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:5 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v8, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:6 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v1, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:7 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v10, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:8 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v12, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:9 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v11, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:10 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v13, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:11 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v14, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:12 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v16, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:13 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v3, v15, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v1, v0, off offset:14 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr2
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr1
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr9
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr16
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr11
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr4
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr15
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr10
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr7
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr13
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr5
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr14
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr12
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr8
-; UNALIGNED_GFX9-NEXT: ; kill: killed $vgpr0
-; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v0, v6, off glc
+; UNALIGNED_GFX9-NEXT: scratch_load_ubyte v0, v0, off offset:15 glc
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -3425,277 +3289,220 @@ define void @store_load_v4i32_unaligned(ptr addrspace(5) nocapture %arg) {
; UNALIGNED_GFX10: ; %bb.0: ; %bb
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 1
-; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v2, 2
-; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v3, 0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v6, 4, v0
+; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v2, 0
+; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v3, 2
; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v1, off
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v1, 3, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v5, 2, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v7, 5, v0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v4, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:1
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v5, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:2
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v1, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:3
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v9, 6, v0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v6, v2, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v3, off offset:4
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v7, v3, off
+; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 3
+; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v3, 4
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:5
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v2, 7, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v10, 8, v0
-; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v11, 3
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v12, 9, v0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v9, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:6
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v13, 10, v0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v2, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:7
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v10, v11, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v1, off offset:8
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v12, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:9
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v11, 11, v0
-; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v8, 4
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v14, 12, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v15, 13, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v16, 14, v0
-; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v17, 15, v0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v13, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:10
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v11, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:11
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v14, v8, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v3, off offset:12
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v15, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:13
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v16, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:14
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_store_byte v17, v3, off
+; UNALIGNED_GFX10-NEXT: scratch_store_byte v0, v2, off offset:15
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v0, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v4, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:1 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v5, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:2 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v1, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:3 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v6, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:4 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v7, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:5 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v9, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:6 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v2, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:7 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v10, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:8 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v12, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:9 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v13, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:10 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v11, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:11 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v14, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:12 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v15, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:13 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v16, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v1, v0, off offset:14 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v17, off glc dlc
+; UNALIGNED_GFX10-NEXT: scratch_load_ubyte v0, v0, off offset:15 glc dlc
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX10-NEXT: s_setpc_b64 s[30:31]
;
; UNALIGNED_GFX942-LABEL: store_load_v4i32_unaligned:
; UNALIGNED_GFX942: ; %bb.0: ; %bb
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v3, 1
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v1, 2
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v2, 2, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v4, 1, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v1, 1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v3, 0
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v6, 4
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v7, 4, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v8, 6, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v10, 8, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v11, 10, v0
-; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v13, 3
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v14, 12, v0
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v15, 14, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v4, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v1, 0
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v2, 2
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:1 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v5, 3, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v2, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:2 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v5, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:3 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v9, 5, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v7, v1, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v2, off offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v9, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:5 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v1, 7, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v8, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:6 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v1, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:7 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v12, 9, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v10, v13, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v2, 3
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v2, off offset:8 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v12, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:9 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v13, 11, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v11, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:10 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v13, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:11 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v16, 13, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v14, v6, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v2, 4
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v2, off offset:12 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v16, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:13 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: v_add_u32_e32 v6, 15, v0
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v15, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:14 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_store_byte v6, v3, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_store_byte v0, v1, off offset:15 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v0, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v4, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:1 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v2, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:2 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v5, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:3 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v7, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v9, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:5 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v8, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:6 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v1, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:7 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v10, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:8 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v12, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:9 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v11, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:10 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v13, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:11 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v14, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:12 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v16, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:13 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v3, v15, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v1, v0, off offset:14 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr2
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr1
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr9
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr16
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr11
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr4
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr15
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr10
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr7
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr13
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr5
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr14
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr12
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr8
-; UNALIGNED_GFX942-NEXT: ; kill: killed $vgpr0
-; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v0, v6, off sc0 sc1
+; UNALIGNED_GFX942-NEXT: scratch_load_ubyte v0, v0, off offset:15 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: s_setpc_b64 s[30:31]
;
; UNALIGNED_GFX11-LABEL: store_load_v4i32_unaligned:
; UNALIGNED_GFX11: ; %bb.0: ; %bb
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
-; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_add_nc_u32 v4, 1, v0
-; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v11, 3 :: v_dual_add_nc_u32 v6, 4, v0
+; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 0
+; UNALIGNED_GFX11-NEXT: v_mov_b32_e32 v3, 2
; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v1, off dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v0
-; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v8, 4 :: v_dual_add_nc_u32 v5, 2, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v7, 5, v0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v4, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:1 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v5, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:2 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v1, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:3 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v9, 6, v0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v6, v2, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v3, off offset:4 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v7, v3, off dlc
+; UNALIGNED_GFX11-NEXT: v_mov_b32_e32 v1, 3
+; UNALIGNED_GFX11-NEXT: v_mov_b32_e32 v3, 4
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:5 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v2, 7, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v10, 8, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v12, 9, v0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v9, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:6 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v13, 10, v0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v2, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:7 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v10, v11, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v1, off offset:8 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v12, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:9 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v11, 11, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v14, 12, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v15, 13, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v16, 14, v0
-; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v17, 15, v0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v13, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:10 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v11, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:11 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v14, v8, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v3, off offset:12 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v15, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:13 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v16, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:14 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_store_b8 v17, v3, off dlc
+; UNALIGNED_GFX11-NEXT: scratch_store_b8 v0, v2, off offset:15 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v0, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v4, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:1 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v5, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:2 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v1, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:3 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v6, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:4 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v7, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:5 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v9, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:6 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v2, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:7 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v10, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:8 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v12, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:9 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v13, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:10 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v11, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:11 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v14, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:12 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v15, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:13 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v16, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v1, v0, off offset:14 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
-; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v17, off glc dlc
+; UNALIGNED_GFX11-NEXT: scratch_load_u8 v0, v0, off offset:15 glc dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX11-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
index 0e132f1..2785b78 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx90a < %s | FileCheck %s -check-prefix=GFX90A
; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck %s -check-prefix=GFX942
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck %s -check-prefix=GFX1250
declare double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double, <4 x i32>, i32, i32, i32, i32 immarg)
declare double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double, ptr addrspace(8), i32, i32, i32, i32 immarg)
@@ -37,6 +38,17 @@ define amdgpu_kernel void @raw_buffer_atomic_add_noret_f64(<4 x i32> %rsrc, doub
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_add_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -56,6 +68,13 @@ define amdgpu_ps void @raw_buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, doub
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_add_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -92,6 +111,24 @@ define amdgpu_kernel void @raw_buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> %rsr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_add_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -120,6 +157,17 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_add_noret_f64(ptr addrspace(8)
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -139,6 +187,13 @@ define amdgpu_ps void @raw_ptr_buffer_atomic_add_rtn_f64(ptr addrspace(8) inreg
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -175,6 +230,24 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_add_rtn_f64_off4_slc(ptr addrsp
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -203,6 +276,17 @@ define amdgpu_kernel void @struct_buffer_atomic_add_noret_f64(<4 x i32> %rsrc, d
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_add_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -222,6 +306,13 @@ define amdgpu_ps void @struct_buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, d
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_add_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -258,6 +349,23 @@ define amdgpu_kernel void @struct_buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> %
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_add_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -286,6 +394,17 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_add_noret_f64(ptr addrspace(
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -305,6 +424,13 @@ define amdgpu_ps void @struct_ptr_buffer_atomic_add_rtn_f64(ptr addrspace(8) inr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -341,6 +467,23 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_add_rtn_f64_off4_slc(ptr add
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -369,6 +512,17 @@ define amdgpu_kernel void @raw_buffer_atomic_min_noret_f64(<4 x i32> %rsrc, doub
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_min_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -388,6 +542,13 @@ define amdgpu_ps void @raw_buffer_atomic_min_rtn_f64(<4 x i32> inreg %rsrc, doub
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_min_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -424,6 +585,24 @@ define amdgpu_kernel void @raw_buffer_atomic_min_rtn_f64_off4_slc(<4 x i32> %rsr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_min_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -452,6 +631,17 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_min_noret_f64(ptr addrspace(8)
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_min_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -471,6 +661,13 @@ define amdgpu_ps void @raw_ptr_buffer_atomic_min_rtn_f64(ptr addrspace(8) inreg
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_min_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -507,6 +704,24 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_min_rtn_f64_off4_slc(ptr addrsp
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_min_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -535,6 +750,17 @@ define amdgpu_kernel void @struct_buffer_atomic_min_noret_f64(<4 x i32> %rsrc, d
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_min_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -554,6 +780,13 @@ define amdgpu_ps void @struct_buffer_atomic_min_rtn_f64(<4 x i32> inreg %rsrc, d
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_min_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -590,6 +823,23 @@ define amdgpu_kernel void @struct_buffer_atomic_min_rtn_f64_off4_slc(<4 x i32> %
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_min_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -618,6 +868,17 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_min_noret_f64(ptr addrspace(
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_min_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -637,6 +898,13 @@ define amdgpu_ps void @struct_ptr_buffer_atomic_min_rtn_f64(ptr addrspace(8) inr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_min_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -673,6 +941,23 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_min_rtn_f64_off4_slc(ptr add
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_min_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -701,6 +986,17 @@ define amdgpu_kernel void @raw_buffer_atomic_max_noret_f64(<4 x i32> %rsrc, doub
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_max_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -720,6 +1016,13 @@ define amdgpu_ps void @raw_buffer_atomic_max_rtn_f64(<4 x i32> inreg %rsrc, doub
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_max_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -756,6 +1059,24 @@ define amdgpu_kernel void @raw_buffer_atomic_max_rtn_f64_off4_slc(<4 x i32> %rsr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_max_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -784,6 +1105,17 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_max_noret_f64(ptr addrspace(8)
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_max_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -803,6 +1135,13 @@ define amdgpu_ps void @raw_ptr_buffer_atomic_max_rtn_f64(ptr addrspace(8) inreg
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_max_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -839,6 +1178,24 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_max_rtn_f64_off4_slc(ptr addrsp
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_max_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -867,6 +1224,17 @@ define amdgpu_kernel void @struct_buffer_atomic_max_noret_f64(<4 x i32> %rsrc, d
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_max_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -886,6 +1254,13 @@ define amdgpu_ps void @struct_buffer_atomic_max_rtn_f64(<4 x i32> inreg %rsrc, d
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_max_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -922,6 +1297,23 @@ define amdgpu_kernel void @struct_buffer_atomic_max_rtn_f64_off4_slc(<4 x i32> %
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_max_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -950,6 +1342,17 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_max_noret_f64(ptr addrspace(
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_max_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -969,6 +1372,13 @@ define amdgpu_ps void @struct_ptr_buffer_atomic_max_rtn_f64(ptr addrspace(8) inr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_max_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -1005,6 +1415,23 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_max_rtn_f64_off4_slc(ptr add
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_max_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -1056,6 +1483,30 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %pt
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: .LBB36_2:
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-NEXT: s_mov_b32 s1, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1250-NEXT: s_cbranch_execz .LBB36_2
+; GFX1250-NEXT: ; %bb.1:
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: .LBB36_2:
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1104,6 +1555,28 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: .LBB37_2:
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat_agent:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-NEXT: s_mov_b32 s1, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1250-NEXT: s_cbranch_execz .LBB37_2
+; GFX1250-NEXT: ; %bb.1:
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: .LBB37_2:
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1154,6 +1627,30 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: .LBB38_2:
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat_system:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-NEXT: s_mov_b32 s1, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1250-NEXT: s_cbranch_execz .LBB38_2
+; GFX1250-NEXT: ; %bb.1:
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: .LBB38_2:
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1202,6 +1699,28 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: .LBB39_2:
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat_flush:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-NEXT: s_mov_b32 s1, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1250-NEXT: s_cbranch_execz .LBB39_2
+; GFX1250-NEXT: ; %bb.1:
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: .LBB39_2:
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1229,6 +1748,19 @@ define double @global_atomic_fadd_f64_rtn_pat(ptr addrspace(1) %ptr, double %dat
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_rtn_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1254,6 +1786,18 @@ define double @global_atomic_fadd_f64_rtn_pat_agent(ptr addrspace(1) %ptr, doubl
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_rtn_pat_agent:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1281,6 +1825,19 @@ define double @global_atomic_fadd_f64_rtn_pat_system(ptr addrspace(1) %ptr, doub
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_rtn_pat_system:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1329,6 +1886,28 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(ptr addrs
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: .LBB43_2:
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-NEXT: s_mov_b32 s1, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1250-NEXT: s_cbranch_execz .LBB43_2
+; GFX1250-NEXT: ; %bb.1:
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: .LBB43_2:
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1360,6 +1939,19 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_noret_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1389,6 +1981,17 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_noret_pat_agent:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1420,6 +2023,19 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_system(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_noret_pat_system:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1447,6 +2063,19 @@ define double @flat_atomic_fadd_f64_rtn_pat(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_rtn_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1472,6 +2101,18 @@ define double @flat_atomic_fadd_f64_rtn_pat_agent(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_rtn_pat_agent:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1501,6 +2142,19 @@ define double @flat_atomic_fadd_f64_rtn_pat_system(ptr %ptr) #1 {
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_rtn_pat_system:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1530,6 +2184,17 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent_safe(ptr %ptr) {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_noret_pat_agent_safe:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1575,6 +2240,40 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: .LBB51_2:
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s1, exec_lo
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1250-NEXT: s_cbranch_execz .LBB51_3
+; GFX1250-NEXT: ; %bb.1:
+; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v4, s1
+; GFX1250-NEXT: ds_load_b64 v[2:3], v4
+; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
+; GFX1250-NEXT: .LBB51_2: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB51_2
+; GFX1250-NEXT: .LBB51_3:
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1620,6 +2319,40 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: .LBB52_2:
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s1, exec_lo
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1250-NEXT: s_cbranch_execz .LBB52_3
+; GFX1250-NEXT: ; %bb.1:
+; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v4, s1
+; GFX1250-NEXT: ds_load_b64 v[2:3], v4
+; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
+; GFX1250-NEXT: .LBB52_2: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB52_2
+; GFX1250-NEXT: .LBB52_3:
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1665,6 +2398,40 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: .LBB53_2:
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s1, exec_lo
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1250-NEXT: s_cbranch_execz .LBB53_3
+; GFX1250-NEXT: ; %bb.1:
+; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v4, s1
+; GFX1250-NEXT: ds_load_b64 v[2:3], v4
+; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
+; GFX1250-NEXT: .LBB53_2: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB53_2
+; GFX1250-NEXT: .LBB53_3:
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1687,6 +2454,29 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data
; GFX942-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_rtn_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v2, v0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB54_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
ret double %ret
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
index eafad58..2226fd2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
@@ -157,7 +157,7 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace(
%gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4
%r0 = load half, ptr addrspace(1) %in1, align 4
%r1 = load half, ptr addrspace(1) %gep2, align 4
- %r2 = frem half %r0, %r1
+ %r2 = frem afn half %r0, %r1
store half %r2, ptr addrspace(1) %out, align 4
ret void
}
@@ -311,7 +311,7 @@ define amdgpu_kernel void @unsafe_frem_f32(ptr addrspace(1) %out, ptr addrspace(
%gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4
%r0 = load float, ptr addrspace(1) %in1, align 4
%r1 = load float, ptr addrspace(1) %gep2, align 4
- %r2 = frem float %r0, %r1
+ %r2 = frem afn float %r0, %r1
store float %r2, ptr addrspace(1) %out, align 4
ret void
}
@@ -489,7 +489,7 @@ define amdgpu_kernel void @unsafe_frem_f64(ptr addrspace(1) %out, ptr addrspace(
ptr addrspace(1) %in2) #1 {
%r0 = load double, ptr addrspace(1) %in1, align 8
%r1 = load double, ptr addrspace(1) %in2, align 8
- %r2 = frem double %r0, %r1
+ %r2 = frem afn double %r0, %r1
store double %r2, ptr addrspace(1) %out, align 8
ret void
}
@@ -1140,5 +1140,5 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
index 8c01bc7..7dce9ac 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
@@ -932,7 +932,7 @@ define {i8, i32} @struct_i8_i32_func_void() #0 {
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) poison`, align 4, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from `ptr addrspace(1) poison` + 4, addrspace 1)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
@@ -952,9 +952,9 @@ define void @void_func_sret_struct_i8_i32(ptr addrspace(5) sret({ i8, i32 }) %ar
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load (s8) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (volatile load (s32) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: %13:_(p5) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK-NEXT: G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store (s8) into %ir.arg0, addrspace 5)
- ; CHECK-NEXT: G_STORE [[LOAD1]](s32), %13(p5) :: (store (s32) into %ir.gep1, addrspace 5)
+ ; CHECK-NEXT: G_STORE [[LOAD1]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.gep1, addrspace 5)
; CHECK-NEXT: SI_RETURN
%val0 = load volatile i8, ptr addrspace(1) poison
%val1 = load volatile i32, ptr addrspace(1) poison
@@ -1018,11 +1018,11 @@ define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) poison`, addrspace 4)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<32 x s32>) from %ir.ptr, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr + 128, align 128, addrspace 1)
; CHECK-NEXT: G_STORE [[LOAD1]](<32 x s32>), [[COPY]](p5) :: (store (<32 x s32>), addrspace 5)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CHECK-NEXT: G_STORE [[LOAD2]](s32), [[PTR_ADD1]](p5) :: (store (s32), align 128, addrspace 5)
; CHECK-NEXT: SI_RETURN
%ptr = load volatile ptr addrspace(1), ptr addrspace(4) poison
@@ -1040,11 +1040,11 @@ define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) poison`, addrspace 4)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p1) :: (load (s32) from %ir.ptr, align 128, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<32 x s32>) from %ir.ptr + 128, addrspace 1)
; CHECK-NEXT: G_STORE [[LOAD1]](s32), [[COPY]](p5) :: (store (s32), align 128, addrspace 5)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CHECK-NEXT: G_STORE [[LOAD2]](<32 x s32>), [[PTR_ADD1]](p5) :: (store (<32 x s32>), addrspace 5)
; CHECK-NEXT: SI_RETURN
%ptr = load volatile ptr addrspace(1), ptr addrspace(4) poison
@@ -1296,23 +1296,23 @@ define %struct.with.ptrs @ptr_in_struct_func_void() #0 {
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load (<32 x s32>) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p1) :: (volatile load (p3) from `ptr addrspace(1) poison` + 128, align 128, addrspace 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 136
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(p1) = G_LOAD [[PTR_ADD1]](p1) :: (volatile load (p1) from `ptr addrspace(1) poison` + 136, addrspace 1)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x p1>) = G_LOAD [[PTR_ADD2]](p1) :: (volatile load (<2 x p1>) from `ptr addrspace(1) poison` + 144, addrspace 1)
; CHECK-NEXT: G_STORE [[LOAD]](<32 x s32>), [[COPY]](p5) :: (store (<32 x s32>), addrspace 5)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CHECK-NEXT: G_STORE [[LOAD1]](p3), [[PTR_ADD3]](p5) :: (store (p3), align 128, addrspace 5)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CHECK-NEXT: G_STORE [[LOAD2]](p1), [[PTR_ADD4]](p5) :: (store (p1), addrspace 5)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CHECK-NEXT: G_STORE [[LOAD3]](<2 x p1>), [[PTR_ADD5]](p5) :: (store (<2 x p1>), addrspace 5)
; CHECK-NEXT: SI_RETURN
%val = load volatile %struct.with.ptrs, ptr addrspace(1) poison
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll
index 122b8fb..11153bb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll
@@ -2078,7 +2078,7 @@ define amdgpu_kernel void @v2p1i8_in_struct_arg({ <2 x ptr addrspace(1)>, <2 x p
; HSA-VI-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; HSA-VI-NEXT: G_STORE [[LOAD]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison`, addrspace 1)
; HSA-VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; HSA-VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; HSA-VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; HSA-VI-NEXT: G_STORE [[LOAD1]](<2 x p3>), [[PTR_ADD2]](p1) :: (store (<2 x p3>) into `ptr addrspace(1) poison` + 16, align 16, addrspace 1)
; HSA-VI-NEXT: S_ENDPGM 0
;
@@ -2096,7 +2096,7 @@ define amdgpu_kernel void @v2p1i8_in_struct_arg({ <2 x ptr addrspace(1)>, <2 x p
; LEGACY-MESA-VI-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; LEGACY-MESA-VI-NEXT: G_STORE [[LOAD]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison`, addrspace 1)
; LEGACY-MESA-VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; LEGACY-MESA-VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; LEGACY-MESA-VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; LEGACY-MESA-VI-NEXT: G_STORE [[LOAD1]](<2 x p3>), [[PTR_ADD2]](p1) :: (store (<2 x p3>) into `ptr addrspace(1) poison` + 16, align 16, addrspace 1)
; LEGACY-MESA-VI-NEXT: S_ENDPGM 0
store { <2 x ptr addrspace(1)>, <2 x ptr addrspace(3)> } %arg, ptr addrspace(1) poison
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll
index 3e7a567..bbbce9a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll
@@ -24,7 +24,7 @@ define amdgpu_kernel void @kernel_call_no_workitem_ids() {
; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY4]]
; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY9]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY9]], [[C]](s64)
; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(s64) = COPY [[COPY3]]
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY2]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY1]]
@@ -65,7 +65,7 @@ define amdgpu_kernel void @kernel_call_no_workgroup_ids() {
; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY4]]
; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY9]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY9]], [[C]](s64)
; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(s64) = COPY [[COPY3]]
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
@@ -105,7 +105,7 @@ define amdgpu_kernel void @kernel_call_no_other_sgprs() {
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @extern
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(p4) = COPY [[COPY3]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY4]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY4]], [[C]](s64)
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
index 33862de..d695155 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
@@ -31,7 +31,7 @@ define amdgpu_kernel void @test_call_external_void_func_i32([17 x i8]) #0 {
; GFX900-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GFX900-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GFX900-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; GFX900-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GFX900-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GFX900-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -84,7 +84,7 @@ define amdgpu_kernel void @test_call_external_void_func_i32([17 x i8]) #0 {
; GFX908-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GFX908-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GFX908-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; GFX908-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GFX908-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GFX908-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -230,7 +230,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32([17 x i8]) #0 {
; GFX900-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GFX900-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GFX900-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; GFX900-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GFX900-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GFX900-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -319,7 +319,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32([17 x i8]) #0 {
; GFX908-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GFX908-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GFX908-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; GFX908-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GFX908-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GFX908-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -668,7 +668,7 @@ define amdgpu_kernel void @test_only_workitem_id_x() #0 !reqd_work_group_size !0
; GFX900-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; GFX900-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
; GFX900-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY10]], [[C1]](s64)
+ ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY10]], [[C1]](s64)
; GFX900-NEXT: [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; GFX900-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; GFX900-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
@@ -710,7 +710,7 @@ define amdgpu_kernel void @test_only_workitem_id_x() #0 !reqd_work_group_size !0
; GFX908-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; GFX908-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
; GFX908-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY10]], [[C1]](s64)
+ ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY10]], [[C1]](s64)
; GFX908-NEXT: [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; GFX908-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; GFX908-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
@@ -756,7 +756,7 @@ define amdgpu_kernel void @test_only_workitem_id_y() #0 !reqd_work_group_size !1
; GFX900-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; GFX900-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
; GFX900-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY10]], [[C1]](s64)
+ ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY10]], [[C1]](s64)
; GFX900-NEXT: [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; GFX900-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; GFX900-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
@@ -802,7 +802,7 @@ define amdgpu_kernel void @test_only_workitem_id_y() #0 !reqd_work_group_size !1
; GFX908-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; GFX908-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
; GFX908-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY10]], [[C1]](s64)
+ ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY10]], [[C1]](s64)
; GFX908-NEXT: [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; GFX908-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; GFX908-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
@@ -852,7 +852,7 @@ define amdgpu_kernel void @test_only_workitem_id_z() #0 !reqd_work_group_size !2
; GFX900-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; GFX900-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
; GFX900-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY10]], [[C1]](s64)
+ ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY10]], [[C1]](s64)
; GFX900-NEXT: [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; GFX900-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; GFX900-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
@@ -898,7 +898,7 @@ define amdgpu_kernel void @test_only_workitem_id_z() #0 !reqd_work_group_size !2
; GFX908-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY5]]
; GFX908-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]](p4)
; GFX908-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY10]], [[C1]](s64)
+ ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY10]], [[C1]](s64)
; GFX908-NEXT: [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
; GFX908-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
; GFX908-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
@@ -949,7 +949,7 @@ define amdgpu_kernel void @test_only_workitem_id_xy() #0 !reqd_work_group_size !
; GFX900-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; GFX900-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
; GFX900-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY11]], [[C1]](s64)
+ ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY11]], [[C1]](s64)
; GFX900-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY5]]
; GFX900-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY4]]
; GFX900-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
@@ -996,7 +996,7 @@ define amdgpu_kernel void @test_only_workitem_id_xy() #0 !reqd_work_group_size !
; GFX908-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; GFX908-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
; GFX908-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY11]], [[C1]](s64)
+ ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY11]], [[C1]](s64)
; GFX908-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY5]]
; GFX908-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY4]]
; GFX908-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
@@ -1047,7 +1047,7 @@ define amdgpu_kernel void @test_only_workitem_id_yz() #0 !reqd_work_group_size !
; GFX900-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; GFX900-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
; GFX900-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY11]], [[C1]](s64)
+ ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY11]], [[C1]](s64)
; GFX900-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY5]]
; GFX900-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY4]]
; GFX900-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
@@ -1098,7 +1098,7 @@ define amdgpu_kernel void @test_only_workitem_id_yz() #0 !reqd_work_group_size !
; GFX908-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; GFX908-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
; GFX908-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY11]], [[C1]](s64)
+ ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY11]], [[C1]](s64)
; GFX908-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY5]]
; GFX908-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY4]]
; GFX908-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
@@ -1153,7 +1153,7 @@ define amdgpu_kernel void @test_only_workitem_id_xz() #0 !reqd_work_group_size !
; GFX900-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; GFX900-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
; GFX900-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY11]], [[C1]](s64)
+ ; GFX900-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY11]], [[C1]](s64)
; GFX900-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY5]]
; GFX900-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY4]]
; GFX900-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
@@ -1200,7 +1200,7 @@ define amdgpu_kernel void @test_only_workitem_id_xz() #0 !reqd_work_group_size !
; GFX908-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
; GFX908-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
; GFX908-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY11]], [[C1]](s64)
+ ; GFX908-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY11]], [[C1]](s64)
; GFX908-NEXT: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY5]]
; GFX908-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY4]]
; GFX908-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll
index c06af21..6bfd0f060 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll
@@ -68,7 +68,7 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32() #0 {
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (invariant load (p1) from `ptr addrspace(4) poison`, addrspace 4)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr0 + 4, addrspace 1)
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_struct_i8_i32
@@ -94,7 +94,7 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32_inreg() #
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (invariant load (p1) from `ptr addrspace(4) poison`, addrspace 4)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr0 + 4, addrspace 1)
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_struct_i8_i32_inreg
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll
index 736bc8b..6573088 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll
@@ -91,7 +91,7 @@ define amdgpu_kernel void @test_call_external_i32_func_i32_imm(ptr addrspace(1)
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -174,7 +174,7 @@ define amdgpu_kernel void @test_call_external_i1_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -252,7 +252,7 @@ define amdgpu_kernel void @test_call_external_i1_zeroext_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -314,7 +314,7 @@ define amdgpu_kernel void @test_call_external_i1_signext_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -376,7 +376,7 @@ define amdgpu_kernel void @test_call_external_i8_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -456,7 +456,7 @@ define amdgpu_kernel void @test_call_external_i8_zeroext_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -518,7 +518,7 @@ define amdgpu_kernel void @test_call_external_i8_signext_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -580,7 +580,7 @@ define amdgpu_kernel void @test_call_external_i16_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -639,7 +639,7 @@ define amdgpu_kernel void @test_call_external_i16_zeroext_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -701,7 +701,7 @@ define amdgpu_kernel void @test_call_external_i16_signext_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -763,7 +763,7 @@ define amdgpu_kernel void @test_call_external_i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -839,7 +839,7 @@ define amdgpu_kernel void @test_call_external_i48_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -900,7 +900,7 @@ define amdgpu_kernel void @test_call_external_i48_zeroext_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -963,7 +963,7 @@ define amdgpu_kernel void @test_call_external_i48_signext_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1026,7 +1026,7 @@ define amdgpu_kernel void @test_call_external_i64_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1086,7 +1086,7 @@ define amdgpu_kernel void @test_call_external_p1_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1146,7 +1146,7 @@ define amdgpu_kernel void @test_call_external_v2p1_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1210,7 +1210,7 @@ define amdgpu_kernel void @test_call_external_p3_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1268,7 +1268,7 @@ define amdgpu_kernel void @test_call_external_v2p3_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1328,7 +1328,7 @@ define amdgpu_kernel void @test_call_external_f16_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1387,7 +1387,7 @@ define amdgpu_kernel void @test_call_external_f32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1445,7 +1445,7 @@ define amdgpu_kernel void @test_call_external_f64_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1505,7 +1505,7 @@ define amdgpu_kernel void @test_call_external_v2f64_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1569,7 +1569,7 @@ define amdgpu_kernel void @test_call_external_v2i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1629,7 +1629,7 @@ define amdgpu_kernel void @test_call_external_v3i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1690,7 +1690,7 @@ define amdgpu_kernel void @test_call_external_v4i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1752,7 +1752,7 @@ define amdgpu_kernel void @test_call_external_v5i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1815,7 +1815,7 @@ define amdgpu_kernel void @test_call_external_v8i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1881,7 +1881,7 @@ define amdgpu_kernel void @test_call_external_v16i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1955,7 +1955,7 @@ define amdgpu_kernel void @test_call_external_v32i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2045,7 +2045,7 @@ define amdgpu_kernel void @test_call_external_v2i16_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2103,7 +2103,7 @@ define amdgpu_kernel void @test_call_external_v3i16_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2165,7 +2165,7 @@ define amdgpu_kernel void @test_call_external_v4i16_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2225,7 +2225,7 @@ define amdgpu_kernel void @test_call_external_v2f16_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2283,7 +2283,7 @@ define amdgpu_kernel void @test_call_external_v3f16_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2345,7 +2345,7 @@ define amdgpu_kernel void @test_call_external_v4f16_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2405,7 +2405,7 @@ define amdgpu_kernel void @test_call_external_v3f32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2466,7 +2466,7 @@ define amdgpu_kernel void @test_call_external_v5f32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2530,7 +2530,7 @@ define amdgpu_kernel void @test_call_external_i32_i64_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2620,7 +2620,7 @@ define amdgpu_kernel void @test_call_external_a2i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2683,7 +2683,7 @@ define amdgpu_kernel void @test_call_external_a5i8_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2769,7 +2769,7 @@ define amdgpu_kernel void @test_call_external_v32i32_i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2800,7 +2800,7 @@ define amdgpu_kernel void @test_call_external_v32i32_i32_func_void() #0 {
; GCN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc
; GCN-NEXT: [[LOAD:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[FRAME_INDEX]](p5) :: (load (<32 x s32>) from %stack.0, addrspace 5)
; GCN-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
- ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from %stack.0, align 128, addrspace 5)
; GCN-NEXT: G_STORE [[LOAD]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `ptr addrspace(1) poison`, align 8, addrspace 1)
; GCN-NEXT: G_STORE [[LOAD1]](s32), [[DEF]](p1) :: (volatile store (s32) into `ptr addrspace(1) poison`, addrspace 1)
@@ -2836,7 +2836,7 @@ define amdgpu_kernel void @test_call_external_i32_v32i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2867,7 +2867,7 @@ define amdgpu_kernel void @test_call_external_i32_v32i32_func_void() #0 {
; GCN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc
; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (load (s32) from %stack.0, align 128, addrspace 5)
; GCN-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
- ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[PTR_ADD1]](p5) :: (load (<32 x s32>) from %stack.0, addrspace 5)
; GCN-NEXT: G_STORE [[LOAD]](s32), [[DEF]](p1) :: (volatile store (s32) into `ptr addrspace(1) poison`, addrspace 1)
; GCN-NEXT: G_STORE [[LOAD1]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `ptr addrspace(1) poison`, align 8, addrspace 1)
@@ -2903,7 +2903,7 @@ define amdgpu_kernel void @test_call_external_v33i32_func_void() #0 {
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2959,8 +2959,8 @@ define amdgpu_kernel void @test_call_external_v33i32_func_v33i32_i32(ptr addrspa
; GCN-NEXT: [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
; GCN-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[INT]](p4) :: (dereferenceable invariant load (p1) from %ir.p.kernarg.offset1, align 16, addrspace 4)
; GCN-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GCN-NEXT: %18:_(p4) = nuw nusw G_PTR_ADD [[INT]], [[C]](s64)
- ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %18(p4) :: (dereferenceable invariant load (s32) from %ir.idx.kernarg.offset, align 8, addrspace 4)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw nusw inbounds G_PTR_ADD [[INT]], [[C]](s64)
+ ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32) from %ir.idx.kernarg.offset, align 8, addrspace 4)
; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
; GCN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; GCN-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_v33i32_func_v33i32_i32
@@ -2968,7 +2968,7 @@ define amdgpu_kernel void @test_call_external_v33i32_func_v33i32_i32(ptr addrspa
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2992,7 +2992,7 @@ define amdgpu_kernel void @test_call_external_v33i32_func_v33i32_i32(ptr addrspa
; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY10]](p4)
; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY11]](p4)
- ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+ ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD1]](p4)
; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY13]](s64)
; GCN-NEXT: $sgpr12 = COPY [[COPY14]](s32)
; GCN-NEXT: $sgpr13 = COPY [[COPY15]](s32)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-sret.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-sret.ll
index b5a87ab..070d35a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-sret.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-sret.ll
@@ -25,16 +25,16 @@ define amdgpu_kernel void @test_call_external_void_func_sret_struct_i8_i32_byval
; GCN-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.1.out.val
; GCN-NEXT: [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
; GCN-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GCN-NEXT: %18:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
; GCN-NEXT: G_STORE [[C]](s8), [[FRAME_INDEX]](p5) :: (store (s8) into %ir.in.val, addrspace 5)
- ; GCN-NEXT: G_STORE [[C1]](s32), %18(p5) :: (store (s32) into %ir.in.gep1, addrspace 5)
+ ; GCN-NEXT: G_STORE [[C1]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.in.gep1, addrspace 5)
; GCN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; GCN-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32
; GCN-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
; GCN-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; GCN-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; GCN-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C3]](s64)
; GCN-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; GCN-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; GCN-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -51,15 +51,15 @@ define amdgpu_kernel void @test_call_external_void_func_sret_struct_i8_i32_byval
; GCN-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; GCN-NEXT: [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sp_reg
; GCN-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C6]](s32)
+ ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C6]](s32)
; GCN-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GCN-NEXT: G_MEMCPY [[PTR_ADD1]](p5), [[FRAME_INDEX]](p5), [[C7]](s32), 0 :: (dereferenceable store (s64) into stack, align 4, addrspace 5), (dereferenceable load (s64) from %ir.in.val, align 4, addrspace 5)
+ ; GCN-NEXT: G_MEMCPY [[PTR_ADD2]](p5), [[FRAME_INDEX]](p5), [[C7]](s32), 0 :: (dereferenceable store (s64) into stack, align 4, addrspace 5), (dereferenceable load (s64) from %ir.in.val, align 4, addrspace 5)
; GCN-NEXT: $vgpr0 = COPY [[FRAME_INDEX1]](p5)
; GCN-NEXT: [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
; GCN-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
; GCN-NEXT: $sgpr4_sgpr5 = COPY [[COPY10]](p4)
; GCN-NEXT: $sgpr6_sgpr7 = COPY [[COPY11]](p4)
- ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+ ; GCN-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD1]](p4)
; GCN-NEXT: $sgpr10_sgpr11 = COPY [[COPY13]](s64)
; GCN-NEXT: $sgpr12 = COPY [[COPY14]](s32)
; GCN-NEXT: $sgpr13 = COPY [[COPY15]](s32)
@@ -68,9 +68,9 @@ define amdgpu_kernel void @test_call_external_void_func_sret_struct_i8_i32_byval
; GCN-NEXT: $vgpr31 = COPY [[OR1]](s32)
; GCN-NEXT: $sgpr30_sgpr31 = noconvergent G_SI_CALL [[GV]](p0), @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32, csr_amdgpu, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31
; GCN-NEXT: ADJCALLSTACKDOWN 0, 8, implicit-def $scc
- ; GCN-NEXT: %46:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX1]], [[C2]](s32)
+ ; GCN-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX1]], [[C2]](s32)
; GCN-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p5) :: (dereferenceable load (s8) from %ir.out.val, addrspace 5)
- ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %46(p5) :: (dereferenceable load (s32) from %ir.out.gep1, addrspace 5)
+ ; GCN-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (dereferenceable load (s32) from %ir.out.gep1, addrspace 5)
; GCN-NEXT: G_STORE [[LOAD]](s8), [[DEF]](p1) :: (volatile store (s8) into `ptr addrspace(1) poison`, addrspace 1)
; GCN-NEXT: G_STORE [[LOAD1]](s32), [[DEF]](p1) :: (volatile store (s32) into `ptr addrspace(1) poison`, addrspace 1)
; GCN-NEXT: S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll
index 1af175a..4e70c15 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll
@@ -133,7 +133,7 @@ define amdgpu_kernel void @test_call_external_void_func_void() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -245,7 +245,7 @@ define amdgpu_kernel void @test_call_external_void_func_empty_struct() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -301,7 +301,7 @@ define amdgpu_kernel void @test_call_external_void_func_empty_array() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -357,7 +357,7 @@ define amdgpu_kernel void @test_call_external_void_func_i1_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -416,7 +416,7 @@ define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -476,7 +476,7 @@ define amdgpu_kernel void @test_call_external_void_func_i1_zeroext(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -535,7 +535,7 @@ define amdgpu_kernel void @test_call_external_void_func_i8_imm(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -595,7 +595,7 @@ define amdgpu_kernel void @test_call_external_void_func_i8_signext(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -656,7 +656,7 @@ define amdgpu_kernel void @test_call_external_void_func_i8_zeroext(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -715,7 +715,7 @@ define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -774,7 +774,7 @@ define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -834,7 +834,7 @@ define amdgpu_kernel void @test_call_external_void_func_i16_zeroext(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -893,7 +893,7 @@ define amdgpu_kernel void @test_call_external_void_func_i32_imm(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -988,7 +988,7 @@ define amdgpu_kernel void @test_call_external_void_func_i64_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1047,7 +1047,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2i64() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1110,7 +1110,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2i64_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C2]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1172,7 +1172,7 @@ define amdgpu_kernel void @test_call_external_void_func_i48(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1234,7 +1234,7 @@ define amdgpu_kernel void @test_call_external_void_func_i48_signext(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1296,7 +1296,7 @@ define amdgpu_kernel void @test_call_external_void_func_i48_zeroext(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1357,7 +1357,7 @@ define amdgpu_kernel void @test_call_external_void_func_p0_imm(ptr %arg) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1416,7 +1416,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2p0() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1482,7 +1482,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3i64() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C2]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1552,7 +1552,7 @@ define amdgpu_kernel void @test_call_external_void_func_v4i64() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C3]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1618,7 +1618,7 @@ define amdgpu_kernel void @test_call_external_void_func_f16_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1675,7 +1675,7 @@ define amdgpu_kernel void @test_call_external_void_func_f32_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1733,7 +1733,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2f32_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C2]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1794,7 +1794,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3f32_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C3]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1858,7 +1858,7 @@ define amdgpu_kernel void @test_call_external_void_func_v5f32_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C5]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1919,7 +1919,7 @@ define amdgpu_kernel void @test_call_external_void_func_f64_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -1979,7 +1979,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2f64_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C2]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2042,7 +2042,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3f64_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C3]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2105,7 +2105,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2i16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2163,7 +2163,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3i16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2226,7 +2226,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3f16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2289,7 +2289,7 @@ define amdgpu_kernel void @test_call_external_void_func_v4i16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2352,7 +2352,7 @@ define amdgpu_kernel void @test_call_external_void_func_v4i16_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C4]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2411,7 +2411,7 @@ define amdgpu_kernel void @test_call_external_void_func_v5i16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2475,7 +2475,7 @@ define amdgpu_kernel void @test_call_external_void_func_v7i16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2540,7 +2540,7 @@ define amdgpu_kernel void @test_call_external_void_func_v63i16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2636,7 +2636,7 @@ define amdgpu_kernel void @test_call_external_void_func_v65i16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2735,7 +2735,7 @@ define amdgpu_kernel void @test_call_external_void_func_v66i16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2831,7 +2831,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2f16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2889,7 +2889,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2i32() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -2950,7 +2950,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2i32_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C2]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3012,7 +3012,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3i32_imm(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C3]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3076,7 +3076,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3i32_i32(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C4]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3137,7 +3137,7 @@ define amdgpu_kernel void @test_call_external_void_func_v4i32() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3202,7 +3202,7 @@ define amdgpu_kernel void @test_call_external_void_func_v4i32_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C4]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3267,7 +3267,7 @@ define amdgpu_kernel void @test_call_external_void_func_v5i32_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C5]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3330,7 +3330,7 @@ define amdgpu_kernel void @test_call_external_void_func_v8i32() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3404,7 +3404,7 @@ define amdgpu_kernel void @test_call_external_void_func_v8i32_imm() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C8]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C8]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3470,7 +3470,7 @@ define amdgpu_kernel void @test_call_external_void_func_v16i32() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3546,7 +3546,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3644,7 +3644,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32_i32(i32) #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3746,7 +3746,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32_i8_i8_i16() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3858,7 +3858,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32_p3_p5() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -3956,7 +3956,7 @@ define amdgpu_kernel void @test_call_external_void_func_struct_i8_i32() #0 {
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (invariant load (p1) from `ptr addrspace(4) poison`, addrspace 4)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: ("amdgpu-noclobber" load (s8) from %ir.ptr0, align 4, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: ("amdgpu-noclobber" load (s32) from %ir.ptr0 + 4, addrspace 1)
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_struct_i8_i32
@@ -3964,7 +3964,7 @@ define amdgpu_kernel void @test_call_external_void_func_struct_i8_i32() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -4010,7 +4010,7 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32() #0 {
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (invariant load (p1) from `ptr addrspace(4) poison`, addrspace 4)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr0 + 4, addrspace 1)
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_struct_i8_i32
@@ -4036,7 +4036,7 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32_inreg() #
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (invariant load (p1) from `ptr addrspace(4) poison`, addrspace 4)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr0 + 4, addrspace 1)
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_struct_i8_i32_inreg
@@ -4076,16 +4076,16 @@ define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.val
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: %15:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
; CHECK-NEXT: G_STORE [[C]](s8), [[FRAME_INDEX]](p5) :: (store (s8) into %ir.val, addrspace 5)
- ; CHECK-NEXT: G_STORE [[C1]](s32), %15(p5) :: (store (s32) into %ir.gep1, addrspace 5)
+ ; CHECK-NEXT: G_STORE [[C1]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.gep1, addrspace 5)
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_byval_struct_i8_i32
; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C3]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -4102,14 +4102,14 @@ define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0
; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK-NEXT: [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sp_reg
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C6]](s32)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C6]](s32)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK-NEXT: G_MEMCPY [[PTR_ADD1]](p5), [[FRAME_INDEX]](p5), [[C7]](s32), 0 :: (dereferenceable store (s64) into stack, align 4, addrspace 5), (dereferenceable load (s64) from %ir.val, align 4, addrspace 5)
+ ; CHECK-NEXT: G_MEMCPY [[PTR_ADD2]](p5), [[FRAME_INDEX]](p5), [[C7]](s32), 0 :: (dereferenceable store (s64) into stack, align 4, addrspace 5), (dereferenceable load (s64) from %ir.val, align 4, addrspace 5)
; CHECK-NEXT: [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
; CHECK-NEXT: $sgpr4_sgpr5 = COPY [[COPY10]](p4)
; CHECK-NEXT: $sgpr6_sgpr7 = COPY [[COPY11]](p4)
- ; CHECK-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+ ; CHECK-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD1]](p4)
; CHECK-NEXT: $sgpr10_sgpr11 = COPY [[COPY13]](s64)
; CHECK-NEXT: $sgpr12 = COPY [[COPY14]](s32)
; CHECK-NEXT: $sgpr13 = COPY [[COPY15]](s32)
@@ -4263,7 +4263,7 @@ define amdgpu_kernel void @test_call_external_void_func_v2i8() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -4329,7 +4329,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3i8() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -4398,7 +4398,7 @@ define amdgpu_kernel void @test_call_external_void_func_v4i8() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -4470,7 +4470,7 @@ define amdgpu_kernel void @test_call_external_void_func_v8i8() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -4554,7 +4554,7 @@ define amdgpu_kernel void @test_call_external_void_func_v16i8() #0 {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -4656,15 +4656,15 @@ define amdgpu_kernel void @stack_passed_arg_alignment_v32i32_f64(<32 x i32> %val
; CHECK-NEXT: [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[INT]](p4) :: (dereferenceable invariant load (<32 x s32>) from %ir.val.kernarg.offset1, align 16, addrspace 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: %18:_(p4) = nuw nusw G_PTR_ADD [[INT]], [[C]](s64)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD %18(p4) :: (dereferenceable invariant load (s64) from %ir.tmp.kernarg.offset, align 16, addrspace 4)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw nusw inbounds G_PTR_ADD [[INT]], [[C]](s64)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s64) from %ir.tmp.kernarg.offset, align 16, addrspace 4)
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @stack_passed_f64_arg
; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 136
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C1]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
@@ -4682,15 +4682,15 @@ define amdgpu_kernel void @stack_passed_arg_alignment_v32i32_f64(<32 x i32> %val
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<32 x s32>)
; CHECK-NEXT: [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sp_reg
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C4]](s32)
- ; CHECK-NEXT: G_STORE [[UV31]](s32), [[PTR_ADD1]](p5) :: (store (s32) into stack, align 16, addrspace 5)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C4]](s32)
+ ; CHECK-NEXT: G_STORE [[UV31]](s32), [[PTR_ADD2]](p5) :: (store (s32) into stack, align 16, addrspace 5)
; CHECK-NEXT: [[UV32:%[0-9]+]]:_(s32), [[UV33:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](s64)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C5]](s32)
- ; CHECK-NEXT: G_STORE [[UV32]](s32), [[PTR_ADD2]](p5) :: (store (s32) into stack + 4, addrspace 5)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C5]](s32)
+ ; CHECK-NEXT: G_STORE [[UV32]](s32), [[PTR_ADD3]](p5) :: (store (s32) into stack + 4, addrspace 5)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C6]](s32)
- ; CHECK-NEXT: G_STORE [[UV33]](s32), [[PTR_ADD3]](p5) :: (store (s32) into stack + 8, align 8, addrspace 5)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C6]](s32)
+ ; CHECK-NEXT: G_STORE [[UV33]](s32), [[PTR_ADD4]](p5) :: (store (s32) into stack + 8, align 8, addrspace 5)
; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
; CHECK-NEXT: $vgpr2 = COPY [[UV2]](s32)
@@ -4726,7 +4726,7 @@ define amdgpu_kernel void @stack_passed_arg_alignment_v32i32_f64(<32 x i32> %val
; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
; CHECK-NEXT: $sgpr4_sgpr5 = COPY [[COPY10]](p4)
; CHECK-NEXT: $sgpr6_sgpr7 = COPY [[COPY11]](p4)
- ; CHECK-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+ ; CHECK-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD1]](p4)
; CHECK-NEXT: $sgpr10_sgpr11 = COPY [[COPY13]](s64)
; CHECK-NEXT: $sgpr12 = COPY [[COPY14]](s32)
; CHECK-NEXT: $sgpr13 = COPY [[COPY15]](s32)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll
index d80f332..644ef05 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll
@@ -97,8 +97,8 @@ define void @i1_arg_i1_use(i1 %arg) #0 {
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[TRUNC]], [[C]]
- ; CHECK-NEXT: [[INTRINSIC_W_SIDE_EFFECTS:%[0-9]+]]:_(s1), [[INTRINSIC_W_SIDE_EFFECTS1:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if), [[XOR]](s1)
- ; CHECK-NEXT: G_BRCOND [[INTRINSIC_W_SIDE_EFFECTS]](s1), %bb.2
+ ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s1), [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if), [[XOR]](s1)
+ ; CHECK-NEXT: G_BRCOND [[INT]](s1), %bb.2
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.bb1:
@@ -108,7 +108,7 @@ define void @i1_arg_i1_use(i1 %arg) #0 {
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.bb2:
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INTRINSIC_W_SIDE_EFFECTS1]](s64)
+ ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT1]](s64)
; CHECK-NEXT: SI_RETURN
bb:
br i1 %arg, label %bb2, label %bb1
@@ -1646,7 +1646,7 @@ define void @void_func_struct_i8_i32({ i8, i32 } %arg0) #0 {
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK-NEXT: G_STORE [[TRUNC1]](s8), [[DEF]](p1) :: (store (s8) into `ptr addrspace(1) poison`, align 4, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C]](s64)
; CHECK-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p1) :: (store (s32) into `ptr addrspace(1) poison` + 4, addrspace 1)
; CHECK-NEXT: SI_RETURN
store { i8, i32 } %arg0, ptr addrspace(1) poison
@@ -1661,11 +1661,11 @@ define void @void_func_byval_struct_i8_i32(ptr addrspace(5) byval({ i8, i32 }) %
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p5) :: (load (s8) from %ir.arg0, align 4, addrspace 5)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from %ir.arg0 + 4, addrspace 5)
; CHECK-NEXT: G_STORE [[LOAD]](s8), [[DEF]](p1) :: (store (s8) into `ptr addrspace(1) poison`, align 4, addrspace 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s32), [[PTR_ADD1]](p1) :: (store (s32) into `ptr addrspace(1) poison` + 4, addrspace 1)
; CHECK-NEXT: SI_RETURN
%arg0.load = load { i8, i32 }, ptr addrspace(5) %arg0
@@ -1687,17 +1687,17 @@ define void @void_func_byval_struct_i8_i32_x2(ptr addrspace(5) byval({ i8, i32 }
; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p5) :: (volatile load (s8) from %ir.arg0, align 4, addrspace 5)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (volatile load (s32) from %ir.arg0 + 4, addrspace 5)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[COPY1]](p5) :: (volatile load (s8) from %ir.arg1, align 4, addrspace 5)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY1]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s32)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (volatile load (s32) from %ir.arg1 + 4, addrspace 5)
; CHECK-NEXT: G_STORE [[LOAD]](s8), [[DEF]](p1) :: (volatile store (s8) into `ptr addrspace(1) poison`, align 4, addrspace 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[LOAD1]](s32), [[PTR_ADD2]](p1) :: (volatile store (s32) into `ptr addrspace(1) poison` + 4, addrspace 1)
; CHECK-NEXT: G_STORE [[LOAD2]](s8), [[DEF]](p1) :: (volatile store (s8) into `ptr addrspace(1) poison`, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[LOAD3]](s32), [[PTR_ADD3]](p1) :: (volatile store (s32) into `ptr addrspace(1) poison` + 4, addrspace 1)
; CHECK-NEXT: G_STORE [[COPY2]](s32), [[DEF1]](p3) :: (volatile store (s32) into `ptr addrspace(3) poison`, addrspace 3)
; CHECK-NEXT: SI_RETURN
@@ -1760,10 +1760,10 @@ define void @byval_a3i32_align128_byval_i16_align64(ptr addrspace(5) byval([3 x
; CHECK-NEXT: [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (dereferenceable load (s32) from %ir.arg0, addrspace 5)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (dereferenceable load (s32) from %ir.arg0 + 4, addrspace 5)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (dereferenceable load (s32) from %ir.arg0 + 8, addrspace 5)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s16) = G_LOAD [[COPY1]](p5) :: (dereferenceable load (s16) from %ir.arg1, addrspace 5)
; CHECK-NEXT: G_STORE [[LOAD]](s32), [[C]](p1) :: (store (s32) into `ptr addrspace(1) null`, addrspace 1)
@@ -2770,7 +2770,7 @@ define void @vector_ptr_in_struct_arg({ <2 x ptr addrspace(1)>, <2 x ptr addrspa
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR1]](<2 x p3>), [[PTR_ADD]](p1) :: (store (<2 x p3>) into `ptr addrspace(1) poison` + 16, align 16, addrspace 1)
; CHECK-NEXT: SI_RETURN
store { <2 x ptr addrspace(1)>, <2 x ptr addrspace(3)> } %arg, ptr addrspace(1) poison
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll
index 7faa43a..af9bcc4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll
@@ -23,7 +23,7 @@ define amdgpu_kernel void @test_indirect_call_sgpr_ptr(ptr %fptr) {
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY12]], [[C]](s64)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-invariant.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-invariant.ll
index 3e44f33..b34d56b4f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-invariant.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-invariant.ll
@@ -43,7 +43,7 @@ define { i32, i64 } @load_const_struct_gv() {
; CHECK-NEXT: [[GV:%[0-9]+]]:_(p1) = G_GLOBAL_VALUE @const_struct_gv
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p1) :: (dereferenceable invariant load (s32) from @const_struct_gv, align 8, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[GV]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[GV]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (dereferenceable invariant load (s64) from @const_struct_gv + 8, addrspace 1)
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](s64)
; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll
index 72c176d..97c3e90 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll
@@ -26,8 +26,8 @@ define fastcc i32 @i32_fastcc_i32_i32_stack_object(i32 %arg0, i32 %arg1) #1 {
; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GCN-NEXT: %4:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
- ; GCN-NEXT: G_STORE [[C]](s32), %4(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
+ ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
; GCN-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
; GCN-NEXT: $vgpr0 = COPY [[ADD]](s32)
; GCN-NEXT: SI_RETURN implicit $vgpr0
@@ -68,8 +68,8 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_i32_stack_object(i32 %a, i32 %b,
; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GCN-NEXT: %5:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
- ; GCN-NEXT: G_STORE [[C]](s32), %5(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
+ ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
@@ -95,8 +95,8 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_i32_callee_stack_object(i32 %a, i
; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
; GCN-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GCN-NEXT: %5:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
- ; GCN-NEXT: G_STORE [[C]](s32), %5(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
+ ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_stack_object
; GCN-NEXT: $vgpr0 = COPY [[COPY]](s32)
; GCN-NEXT: $vgpr1 = COPY [[COPY1]](s32)
@@ -451,8 +451,8 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_i32_a32i32_stack_object(i32 %a, i
; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
; GCN-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GCN-NEXT: %39:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s32)
- ; GCN-NEXT: G_STORE [[C]](s32), %39(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s32)
+ ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
; GCN-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
; GCN-NEXT: G_STORE [[LOAD]](s32), [[FRAME_INDEX4]](p5) :: (store (s32) into %fixed-stack.2, align 16, addrspace 5)
@@ -646,8 +646,8 @@ define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32(i32 %a, i3
; GCN-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
; GCN-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GCN-NEXT: %39:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s32)
- ; GCN-NEXT: G_STORE [[C]](s32), %39(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s32)
+ ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
; GCN-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
; GCN-NEXT: G_STORE [[LOAD]](s32), [[FRAME_INDEX4]](p5) :: (store (s32) into %fixed-stack.2, align 16, addrspace 5)
@@ -751,8 +751,8 @@ define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32_larger_arg
; GCN-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GCN-NEXT: [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
; GCN-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GCN-NEXT: %47:_(p5) = nuw nusw G_PTR_ADD [[FRAME_INDEX7]], [[C2]](s32)
- ; GCN-NEXT: G_STORE [[C]](s32), %47(p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX7]], [[C2]](s32)
+ ; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
; GCN-NEXT: [[FRAME_INDEX8:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
; GCN-NEXT: G_STORE [[C1]](s32), [[FRAME_INDEX8]](p5) :: (store (s32) into %fixed-stack.2, align 16, addrspace 5)
@@ -923,13 +923,13 @@ define fastcc void @sibling_call_fastcc_multi_byval(i32 %a, [64 x i32]) #1 {
; GCN-NEXT: [[FRAME_INDEX35:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.1.alloca1
; GCN-NEXT: G_STORE [[C]](s32), [[FRAME_INDEX34]](p5) :: (store (s32) into %ir.alloca0, addrspace 5)
; GCN-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX34]], [[C2]](s32)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX34]], [[C2]](s32)
; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.alloca0 + 4, addrspace 5)
; GCN-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX34]], [[C3]](s32)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX34]], [[C3]](s32)
; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD1]](p5) :: (store (s32) into %ir.alloca0 + 8, addrspace 5)
; GCN-NEXT: G_STORE [[C1]](s64), [[FRAME_INDEX35]](p5) :: (store (s64) into %ir.alloca1, addrspace 5)
- ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX35]], [[C3]](s32)
+ ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX35]], [[C3]](s32)
; GCN-NEXT: G_STORE [[C1]](s64), [[PTR_ADD2]](p5) :: (store (s64) into %ir.alloca1 + 8, addrspace 5)
; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @void_fastcc_multi_byval
; GCN-NEXT: [[COPY40:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
@@ -1090,10 +1090,10 @@ define fastcc void @sibling_call_byval_and_stack_passed(i32 %stack.out.arg, [64
; GCN-NEXT: [[FRAME_INDEX34:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
; GCN-NEXT: G_STORE [[C]](s32), [[FRAME_INDEX34]](p5) :: (store (s32) into %ir.alloca, addrspace 5)
; GCN-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX34]], [[C2]](s32)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX34]], [[C2]](s32)
; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.alloca + 4, addrspace 5)
; GCN-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX34]], [[C3]](s32)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX34]], [[C3]](s32)
; GCN-NEXT: G_STORE [[C]](s32), [[PTR_ADD1]](p5) :: (store (s32) into %ir.alloca + 8, addrspace 5)
; GCN-NEXT: [[GV:%[0-9]+]]:ccr_sgpr_64(p0) = G_GLOBAL_VALUE @void_fastcc_byval_and_stack_passed
; GCN-NEXT: [[COPY40:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir
index 0b3b428..6a4522f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir
@@ -143,7 +143,7 @@ body: |
; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p5) = COPY $vgpr0
; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 68
- ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY2]], [[C]](s64)
+ ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64)
; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), addrspace 4)
; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p5)
; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32)
@@ -152,6 +152,7 @@ body: |
; SIVI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY1]](p5), [[C1]]
; SIVI-NEXT: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C2]]
; SIVI-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](p0)
+ ;
; GFX9-LABEL: name: test_addrspacecast_p5_to_p0
; GFX9: liveins: $vgpr0
; GFX9-NEXT: {{ $}}
@@ -211,7 +212,7 @@ body: |
; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr0
; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY2]], [[C]](s64)
+ ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64)
; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4)
; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p3)
; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32)
@@ -220,6 +221,7 @@ body: |
; SIVI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY1]](p3), [[C1]]
; SIVI-NEXT: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C2]]
; SIVI-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](p0)
+ ;
; GFX9-LABEL: name: test_addrspacecast_p3_to_p0
; GFX9: liveins: $vgpr0
; GFX9-NEXT: {{ $}}
@@ -354,7 +356,7 @@ body: |
; SIVI-NEXT: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[COPY1]](<2 x p3>)
; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY2]], [[C]](s64)
+ ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64)
; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4)
; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3)
; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32)
@@ -363,7 +365,7 @@ body: |
; SIVI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](p3), [[C1]]
; SIVI-NEXT: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C2]]
; SIVI-NEXT: [[COPY3:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
- ; SIVI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY3]], [[C]](s64)
+ ; SIVI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY3]], [[C]](s64)
; SIVI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4)
; SIVI-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3)
; SIVI-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT1]](s32), [[LOAD1]](s32)
@@ -371,6 +373,7 @@ body: |
; SIVI-NEXT: [[SELECT1:%[0-9]+]]:_(p0) = G_SELECT [[ICMP1]](s1), [[MV1]], [[C2]]
; SIVI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[SELECT]](p0), [[SELECT1]](p0)
; SIVI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x p0>)
+ ;
; GFX9-LABEL: name: test_addrspacecast_v2p3_to_v2p0
; GFX9: liveins: $vgpr0_vgpr1
; GFX9-NEXT: {{ $}}
@@ -505,11 +508,12 @@ body: |
; SIVI-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 68
- ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), addrspace 4)
; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5)
; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32)
; SIVI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p0)
+ ;
; GFX9-LABEL: name: test_addrspacecast_p5_fi_to_p0
; GFX9: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_private_base
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir
index 9315533..724d581 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir
@@ -1068,7 +1068,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<16 x s32>)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
@@ -1115,7 +1115,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD]](<16 x s32>)
; CHECK-NEXT: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3), [[UV2:%[0-9]+]]:_(p3), [[UV3:%[0-9]+]]:_(p3), [[UV4:%[0-9]+]]:_(p3), [[UV5:%[0-9]+]]:_(p3), [[UV6:%[0-9]+]]:_(p3), [[UV7:%[0-9]+]]:_(p3), [[UV8:%[0-9]+]]:_(p3), [[UV9:%[0-9]+]]:_(p3), [[UV10:%[0-9]+]]:_(p3), [[UV11:%[0-9]+]]:_(p3), [[UV12:%[0-9]+]]:_(p3), [[UV13:%[0-9]+]]:_(p3), [[UV14:%[0-9]+]]:_(p3), [[UV15:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST]](<16 x p3>)
@@ -1142,13 +1142,13 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (<16 x s32>), align 4, addrspace 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 64, align 4, addrspace 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<16 x s32>) from unknown-address + 192, align 4, addrspace 4)
; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<16 x s32>)
@@ -1157,193 +1157,193 @@ body: |
; CHECK-NEXT: [[UV48:%[0-9]+]]:_(s32), [[UV49:%[0-9]+]]:_(s32), [[UV50:%[0-9]+]]:_(s32), [[UV51:%[0-9]+]]:_(s32), [[UV52:%[0-9]+]]:_(s32), [[UV53:%[0-9]+]]:_(s32), [[UV54:%[0-9]+]]:_(s32), [[UV55:%[0-9]+]]:_(s32), [[UV56:%[0-9]+]]:_(s32), [[UV57:%[0-9]+]]:_(s32), [[UV58:%[0-9]+]]:_(s32), [[UV59:%[0-9]+]]:_(s32), [[UV60:%[0-9]+]]:_(s32), [[UV61:%[0-9]+]]:_(s32), [[UV62:%[0-9]+]]:_(s32), [[UV63:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD3]](<16 x s32>)
; CHECK-NEXT: G_STORE [[UV]](s32), [[FRAME_INDEX]](p5) :: (store (s32) into %stack.0, align 256, addrspace 5)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
; CHECK-NEXT: G_STORE [[UV1]](s32), [[PTR_ADD3]](p5) :: (store (s32) into %stack.0 + 4, basealign 256, addrspace 5)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
; CHECK-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD4]](p5) :: (store (s32) into %stack.0 + 8, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
; CHECK-NEXT: G_STORE [[UV3]](s32), [[PTR_ADD5]](p5) :: (store (s32) into %stack.0 + 12, basealign 256, addrspace 5)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
; CHECK-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD6]](p5) :: (store (s32) into %stack.0 + 16, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
; CHECK-NEXT: G_STORE [[UV5]](s32), [[PTR_ADD7]](p5) :: (store (s32) into %stack.0 + 20, basealign 256, addrspace 5)
; CHECK-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
+ ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
; CHECK-NEXT: G_STORE [[UV6]](s32), [[PTR_ADD8]](p5) :: (store (s32) into %stack.0 + 24, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
+ ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
; CHECK-NEXT: G_STORE [[UV7]](s32), [[PTR_ADD9]](p5) :: (store (s32) into %stack.0 + 28, basealign 256, addrspace 5)
; CHECK-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
+ ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
; CHECK-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD10]](p5) :: (store (s32) into %stack.0 + 32, align 32, basealign 256, addrspace 5)
; CHECK-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
- ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
+ ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
; CHECK-NEXT: G_STORE [[UV9]](s32), [[PTR_ADD11]](p5) :: (store (s32) into %stack.0 + 36, basealign 256, addrspace 5)
; CHECK-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
+ ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
; CHECK-NEXT: G_STORE [[UV10]](s32), [[PTR_ADD12]](p5) :: (store (s32) into %stack.0 + 40, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
- ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
+ ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
; CHECK-NEXT: G_STORE [[UV11]](s32), [[PTR_ADD13]](p5) :: (store (s32) into %stack.0 + 44, basealign 256, addrspace 5)
; CHECK-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
+ ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
; CHECK-NEXT: G_STORE [[UV12]](s32), [[PTR_ADD14]](p5) :: (store (s32) into %stack.0 + 48, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
- ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
+ ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
; CHECK-NEXT: G_STORE [[UV13]](s32), [[PTR_ADD15]](p5) :: (store (s32) into %stack.0 + 52, basealign 256, addrspace 5)
; CHECK-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; CHECK-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
+ ; CHECK-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
; CHECK-NEXT: G_STORE [[UV14]](s32), [[PTR_ADD16]](p5) :: (store (s32) into %stack.0 + 56, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
- ; CHECK-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
+ ; CHECK-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
; CHECK-NEXT: G_STORE [[UV15]](s32), [[PTR_ADD17]](p5) :: (store (s32) into %stack.0 + 60, basealign 256, addrspace 5)
; CHECK-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
- ; CHECK-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
+ ; CHECK-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
; CHECK-NEXT: G_STORE [[UV16]](s32), [[PTR_ADD18]](p5) :: (store (s32) into %stack.0 + 64, align 64, basealign 256, addrspace 5)
; CHECK-NEXT: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 68
- ; CHECK-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
+ ; CHECK-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
; CHECK-NEXT: G_STORE [[UV17]](s32), [[PTR_ADD19]](p5) :: (store (s32) into %stack.0 + 68, basealign 256, addrspace 5)
; CHECK-NEXT: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 72
- ; CHECK-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
+ ; CHECK-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
; CHECK-NEXT: G_STORE [[UV18]](s32), [[PTR_ADD20]](p5) :: (store (s32) into %stack.0 + 72, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
- ; CHECK-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
+ ; CHECK-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
; CHECK-NEXT: G_STORE [[UV19]](s32), [[PTR_ADD21]](p5) :: (store (s32) into %stack.0 + 76, basealign 256, addrspace 5)
; CHECK-NEXT: [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 80
- ; CHECK-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
+ ; CHECK-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
; CHECK-NEXT: G_STORE [[UV20]](s32), [[PTR_ADD22]](p5) :: (store (s32) into %stack.0 + 80, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 84
- ; CHECK-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
+ ; CHECK-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
; CHECK-NEXT: G_STORE [[UV21]](s32), [[PTR_ADD23]](p5) :: (store (s32) into %stack.0 + 84, basealign 256, addrspace 5)
; CHECK-NEXT: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 88
- ; CHECK-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
+ ; CHECK-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
; CHECK-NEXT: G_STORE [[UV22]](s32), [[PTR_ADD24]](p5) :: (store (s32) into %stack.0 + 88, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 92
- ; CHECK-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
+ ; CHECK-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
; CHECK-NEXT: G_STORE [[UV23]](s32), [[PTR_ADD25]](p5) :: (store (s32) into %stack.0 + 92, basealign 256, addrspace 5)
; CHECK-NEXT: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 96
- ; CHECK-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
+ ; CHECK-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
; CHECK-NEXT: G_STORE [[UV24]](s32), [[PTR_ADD26]](p5) :: (store (s32) into %stack.0 + 96, align 32, basealign 256, addrspace 5)
; CHECK-NEXT: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
- ; CHECK-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
+ ; CHECK-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
; CHECK-NEXT: G_STORE [[UV25]](s32), [[PTR_ADD27]](p5) :: (store (s32) into %stack.0 + 100, basealign 256, addrspace 5)
; CHECK-NEXT: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
- ; CHECK-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
+ ; CHECK-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
; CHECK-NEXT: G_STORE [[UV26]](s32), [[PTR_ADD28]](p5) :: (store (s32) into %stack.0 + 104, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 108
- ; CHECK-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
+ ; CHECK-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
; CHECK-NEXT: G_STORE [[UV27]](s32), [[PTR_ADD29]](p5) :: (store (s32) into %stack.0 + 108, basealign 256, addrspace 5)
; CHECK-NEXT: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 112
- ; CHECK-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
+ ; CHECK-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
; CHECK-NEXT: G_STORE [[UV28]](s32), [[PTR_ADD30]](p5) :: (store (s32) into %stack.0 + 112, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C31:%[0-9]+]]:_(s32) = G_CONSTANT i32 116
- ; CHECK-NEXT: [[PTR_ADD31:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
+ ; CHECK-NEXT: [[PTR_ADD31:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
; CHECK-NEXT: G_STORE [[UV29]](s32), [[PTR_ADD31]](p5) :: (store (s32) into %stack.0 + 116, basealign 256, addrspace 5)
; CHECK-NEXT: [[C32:%[0-9]+]]:_(s32) = G_CONSTANT i32 120
- ; CHECK-NEXT: [[PTR_ADD32:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
+ ; CHECK-NEXT: [[PTR_ADD32:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
; CHECK-NEXT: G_STORE [[UV30]](s32), [[PTR_ADD32]](p5) :: (store (s32) into %stack.0 + 120, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C33:%[0-9]+]]:_(s32) = G_CONSTANT i32 124
- ; CHECK-NEXT: [[PTR_ADD33:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
+ ; CHECK-NEXT: [[PTR_ADD33:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
; CHECK-NEXT: G_STORE [[UV31]](s32), [[PTR_ADD33]](p5) :: (store (s32) into %stack.0 + 124, basealign 256, addrspace 5)
; CHECK-NEXT: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
- ; CHECK-NEXT: [[PTR_ADD34:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
+ ; CHECK-NEXT: [[PTR_ADD34:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
; CHECK-NEXT: G_STORE [[UV32]](s32), [[PTR_ADD34]](p5) :: (store (s32) into %stack.0 + 128, align 128, basealign 256, addrspace 5)
; CHECK-NEXT: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 132
- ; CHECK-NEXT: [[PTR_ADD35:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
+ ; CHECK-NEXT: [[PTR_ADD35:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
; CHECK-NEXT: G_STORE [[UV33]](s32), [[PTR_ADD35]](p5) :: (store (s32) into %stack.0 + 132, basealign 256, addrspace 5)
; CHECK-NEXT: [[C36:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
- ; CHECK-NEXT: [[PTR_ADD36:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
+ ; CHECK-NEXT: [[PTR_ADD36:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
; CHECK-NEXT: G_STORE [[UV34]](s32), [[PTR_ADD36]](p5) :: (store (s32) into %stack.0 + 136, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C37:%[0-9]+]]:_(s32) = G_CONSTANT i32 140
- ; CHECK-NEXT: [[PTR_ADD37:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
+ ; CHECK-NEXT: [[PTR_ADD37:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
; CHECK-NEXT: G_STORE [[UV35]](s32), [[PTR_ADD37]](p5) :: (store (s32) into %stack.0 + 140, basealign 256, addrspace 5)
; CHECK-NEXT: [[C38:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
- ; CHECK-NEXT: [[PTR_ADD38:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
+ ; CHECK-NEXT: [[PTR_ADD38:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
; CHECK-NEXT: G_STORE [[UV36]](s32), [[PTR_ADD38]](p5) :: (store (s32) into %stack.0 + 144, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C39:%[0-9]+]]:_(s32) = G_CONSTANT i32 148
- ; CHECK-NEXT: [[PTR_ADD39:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
+ ; CHECK-NEXT: [[PTR_ADD39:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
; CHECK-NEXT: G_STORE [[UV37]](s32), [[PTR_ADD39]](p5) :: (store (s32) into %stack.0 + 148, basealign 256, addrspace 5)
; CHECK-NEXT: [[C40:%[0-9]+]]:_(s32) = G_CONSTANT i32 152
- ; CHECK-NEXT: [[PTR_ADD40:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
+ ; CHECK-NEXT: [[PTR_ADD40:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
; CHECK-NEXT: G_STORE [[UV38]](s32), [[PTR_ADD40]](p5) :: (store (s32) into %stack.0 + 152, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C41:%[0-9]+]]:_(s32) = G_CONSTANT i32 156
- ; CHECK-NEXT: [[PTR_ADD41:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
+ ; CHECK-NEXT: [[PTR_ADD41:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
; CHECK-NEXT: G_STORE [[UV39]](s32), [[PTR_ADD41]](p5) :: (store (s32) into %stack.0 + 156, basealign 256, addrspace 5)
; CHECK-NEXT: [[C42:%[0-9]+]]:_(s32) = G_CONSTANT i32 160
- ; CHECK-NEXT: [[PTR_ADD42:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
+ ; CHECK-NEXT: [[PTR_ADD42:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
; CHECK-NEXT: G_STORE [[UV40]](s32), [[PTR_ADD42]](p5) :: (store (s32) into %stack.0 + 160, align 32, basealign 256, addrspace 5)
; CHECK-NEXT: [[C43:%[0-9]+]]:_(s32) = G_CONSTANT i32 164
- ; CHECK-NEXT: [[PTR_ADD43:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
+ ; CHECK-NEXT: [[PTR_ADD43:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
; CHECK-NEXT: G_STORE [[UV41]](s32), [[PTR_ADD43]](p5) :: (store (s32) into %stack.0 + 164, basealign 256, addrspace 5)
; CHECK-NEXT: [[C44:%[0-9]+]]:_(s32) = G_CONSTANT i32 168
- ; CHECK-NEXT: [[PTR_ADD44:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
+ ; CHECK-NEXT: [[PTR_ADD44:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
; CHECK-NEXT: G_STORE [[UV42]](s32), [[PTR_ADD44]](p5) :: (store (s32) into %stack.0 + 168, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C45:%[0-9]+]]:_(s32) = G_CONSTANT i32 172
- ; CHECK-NEXT: [[PTR_ADD45:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
+ ; CHECK-NEXT: [[PTR_ADD45:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
; CHECK-NEXT: G_STORE [[UV43]](s32), [[PTR_ADD45]](p5) :: (store (s32) into %stack.0 + 172, basealign 256, addrspace 5)
; CHECK-NEXT: [[C46:%[0-9]+]]:_(s32) = G_CONSTANT i32 176
- ; CHECK-NEXT: [[PTR_ADD46:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
+ ; CHECK-NEXT: [[PTR_ADD46:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
; CHECK-NEXT: G_STORE [[UV44]](s32), [[PTR_ADD46]](p5) :: (store (s32) into %stack.0 + 176, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C47:%[0-9]+]]:_(s32) = G_CONSTANT i32 180
- ; CHECK-NEXT: [[PTR_ADD47:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
+ ; CHECK-NEXT: [[PTR_ADD47:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
; CHECK-NEXT: G_STORE [[UV45]](s32), [[PTR_ADD47]](p5) :: (store (s32) into %stack.0 + 180, basealign 256, addrspace 5)
; CHECK-NEXT: [[C48:%[0-9]+]]:_(s32) = G_CONSTANT i32 184
- ; CHECK-NEXT: [[PTR_ADD48:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
+ ; CHECK-NEXT: [[PTR_ADD48:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
; CHECK-NEXT: G_STORE [[UV46]](s32), [[PTR_ADD48]](p5) :: (store (s32) into %stack.0 + 184, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C49:%[0-9]+]]:_(s32) = G_CONSTANT i32 188
- ; CHECK-NEXT: [[PTR_ADD49:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
+ ; CHECK-NEXT: [[PTR_ADD49:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
; CHECK-NEXT: G_STORE [[UV47]](s32), [[PTR_ADD49]](p5) :: (store (s32) into %stack.0 + 188, basealign 256, addrspace 5)
; CHECK-NEXT: [[C50:%[0-9]+]]:_(s32) = G_CONSTANT i32 192
- ; CHECK-NEXT: [[PTR_ADD50:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
+ ; CHECK-NEXT: [[PTR_ADD50:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
; CHECK-NEXT: G_STORE [[UV48]](s32), [[PTR_ADD50]](p5) :: (store (s32) into %stack.0 + 192, align 64, basealign 256, addrspace 5)
; CHECK-NEXT: [[C51:%[0-9]+]]:_(s32) = G_CONSTANT i32 196
- ; CHECK-NEXT: [[PTR_ADD51:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
+ ; CHECK-NEXT: [[PTR_ADD51:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
; CHECK-NEXT: G_STORE [[UV49]](s32), [[PTR_ADD51]](p5) :: (store (s32) into %stack.0 + 196, basealign 256, addrspace 5)
; CHECK-NEXT: [[C52:%[0-9]+]]:_(s32) = G_CONSTANT i32 200
- ; CHECK-NEXT: [[PTR_ADD52:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
+ ; CHECK-NEXT: [[PTR_ADD52:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
; CHECK-NEXT: G_STORE [[UV50]](s32), [[PTR_ADD52]](p5) :: (store (s32) into %stack.0 + 200, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C53:%[0-9]+]]:_(s32) = G_CONSTANT i32 204
- ; CHECK-NEXT: [[PTR_ADD53:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
+ ; CHECK-NEXT: [[PTR_ADD53:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
; CHECK-NEXT: G_STORE [[UV51]](s32), [[PTR_ADD53]](p5) :: (store (s32) into %stack.0 + 204, basealign 256, addrspace 5)
; CHECK-NEXT: [[C54:%[0-9]+]]:_(s32) = G_CONSTANT i32 208
- ; CHECK-NEXT: [[PTR_ADD54:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
+ ; CHECK-NEXT: [[PTR_ADD54:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
; CHECK-NEXT: G_STORE [[UV52]](s32), [[PTR_ADD54]](p5) :: (store (s32) into %stack.0 + 208, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C55:%[0-9]+]]:_(s32) = G_CONSTANT i32 212
- ; CHECK-NEXT: [[PTR_ADD55:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
+ ; CHECK-NEXT: [[PTR_ADD55:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
; CHECK-NEXT: G_STORE [[UV53]](s32), [[PTR_ADD55]](p5) :: (store (s32) into %stack.0 + 212, basealign 256, addrspace 5)
; CHECK-NEXT: [[C56:%[0-9]+]]:_(s32) = G_CONSTANT i32 216
- ; CHECK-NEXT: [[PTR_ADD56:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
+ ; CHECK-NEXT: [[PTR_ADD56:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
; CHECK-NEXT: G_STORE [[UV54]](s32), [[PTR_ADD56]](p5) :: (store (s32) into %stack.0 + 216, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C57:%[0-9]+]]:_(s32) = G_CONSTANT i32 220
- ; CHECK-NEXT: [[PTR_ADD57:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
+ ; CHECK-NEXT: [[PTR_ADD57:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
; CHECK-NEXT: G_STORE [[UV55]](s32), [[PTR_ADD57]](p5) :: (store (s32) into %stack.0 + 220, basealign 256, addrspace 5)
; CHECK-NEXT: [[C58:%[0-9]+]]:_(s32) = G_CONSTANT i32 224
- ; CHECK-NEXT: [[PTR_ADD58:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
+ ; CHECK-NEXT: [[PTR_ADD58:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
; CHECK-NEXT: G_STORE [[UV56]](s32), [[PTR_ADD58]](p5) :: (store (s32) into %stack.0 + 224, align 32, basealign 256, addrspace 5)
; CHECK-NEXT: [[C59:%[0-9]+]]:_(s32) = G_CONSTANT i32 228
- ; CHECK-NEXT: [[PTR_ADD59:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
+ ; CHECK-NEXT: [[PTR_ADD59:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
; CHECK-NEXT: G_STORE [[UV57]](s32), [[PTR_ADD59]](p5) :: (store (s32) into %stack.0 + 228, basealign 256, addrspace 5)
; CHECK-NEXT: [[C60:%[0-9]+]]:_(s32) = G_CONSTANT i32 232
- ; CHECK-NEXT: [[PTR_ADD60:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
+ ; CHECK-NEXT: [[PTR_ADD60:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
; CHECK-NEXT: G_STORE [[UV58]](s32), [[PTR_ADD60]](p5) :: (store (s32) into %stack.0 + 232, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C61:%[0-9]+]]:_(s32) = G_CONSTANT i32 236
- ; CHECK-NEXT: [[PTR_ADD61:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
+ ; CHECK-NEXT: [[PTR_ADD61:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
; CHECK-NEXT: G_STORE [[UV59]](s32), [[PTR_ADD61]](p5) :: (store (s32) into %stack.0 + 236, basealign 256, addrspace 5)
; CHECK-NEXT: [[C62:%[0-9]+]]:_(s32) = G_CONSTANT i32 240
- ; CHECK-NEXT: [[PTR_ADD62:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
+ ; CHECK-NEXT: [[PTR_ADD62:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
; CHECK-NEXT: G_STORE [[UV60]](s32), [[PTR_ADD62]](p5) :: (store (s32) into %stack.0 + 240, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C63:%[0-9]+]]:_(s32) = G_CONSTANT i32 244
- ; CHECK-NEXT: [[PTR_ADD63:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
+ ; CHECK-NEXT: [[PTR_ADD63:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
; CHECK-NEXT: G_STORE [[UV61]](s32), [[PTR_ADD63]](p5) :: (store (s32) into %stack.0 + 244, basealign 256, addrspace 5)
; CHECK-NEXT: [[C64:%[0-9]+]]:_(s32) = G_CONSTANT i32 248
- ; CHECK-NEXT: [[PTR_ADD64:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
+ ; CHECK-NEXT: [[PTR_ADD64:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
; CHECK-NEXT: G_STORE [[UV62]](s32), [[PTR_ADD64]](p5) :: (store (s32) into %stack.0 + 248, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C65:%[0-9]+]]:_(s32) = G_CONSTANT i32 252
- ; CHECK-NEXT: [[PTR_ADD65:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
+ ; CHECK-NEXT: [[PTR_ADD65:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
; CHECK-NEXT: G_STORE [[UV63]](s32), [[PTR_ADD65]](p5) :: (store (s32) into %stack.0 + 252, basealign 256, addrspace 5)
; CHECK-NEXT: [[C66:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C66]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
index 1f9c059..3fa73c2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
@@ -2,9 +2,8 @@
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer -enable-unsafe-fp-math -o - %s | FileCheck -check-prefix=GFX9-UNSAFE %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX11 %s
---
name: test_fdiv_s16
@@ -99,17 +98,56 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX10-LABEL: name: test_fdiv_s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC1]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC1]](s16), [[TRUNC]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
@@ -120,6 +158,90 @@ body: |
...
---
+name: test_fdiv_s16_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s16_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; SI-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[FPEXT]], [[INT]]
+ ; SI-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
+ ; SI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s16_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; VI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s16_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s16_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = afn G_FDIV %2, %3
+ %5:_(s32) = G_ANYEXT %4
+ $vgpr0 = COPY %5
+...
+
+---
name: test_fdiv_s32_denorms_on
machineFunctionInfo:
mode:
@@ -192,15 +314,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_on
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_on
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -220,6 +333,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_on
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FDIV %0, %1
@@ -227,6 +360,70 @@ body: |
...
---
+name: test_fdiv_s32_denorms_on_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; SI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; VI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX9-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX10-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX11-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = afn G_FDIV %0, %1
+ $vgpr0 = COPY %2
+...
+
+
+---
name: test_fdiv_s32_denorms_off
machineFunctionInfo:
mode:
@@ -305,15 +502,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_off
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_off
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -335,6 +523,28 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FDIV %0, %1
@@ -342,6 +552,69 @@ body: |
...
---
+name: test_fdiv_s32_denorms_off_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: false
+ fp32-output-denormals: false
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; SI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; VI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX9-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX10-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX11-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = afn G_FDIV %0, %1
+ $vgpr0 = COPY %2
+...
+
+---
name: test_fdiv_s32_denorms_off_arcp
machineFunctionInfo:
mode:
@@ -420,15 +693,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_off_arcp
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = arcp G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_off_arcp
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -450,6 +714,28 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off_arcp
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = arcp G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = arcp G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = arcp G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = arcp G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = arcp G_FDIV %0, %1
@@ -536,23 +822,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY1]]
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -572,6 +841,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_FDIV %0, %1
@@ -708,20 +997,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s32
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s32
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -760,6 +1035,45 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s32
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = G_FDIV %0, %1
@@ -877,20 +1191,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s32_flags
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s32_flags
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -925,6 +1225,41 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s32_flags
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = nnan G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = nnan G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = nnan G_FDIV %0, %1
@@ -1078,22 +1413,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v3s32
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV4]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV5]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[UV2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32), [[FMUL2]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v3s32
; GFX10: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
; GFX10-NEXT: {{ $}}
@@ -1140,6 +1459,53 @@ body: |
; GFX10-NEXT: [[INT20:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT19]](s32), [[UV5]](s32), [[UV2]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v3s32
+ ; GFX11: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
+ ; GFX11-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV3]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV4]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV4]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV4]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[INT14:%[0-9]+]]:_(s32), [[INT15:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV2]](s32), [[UV5]](s32), 0
+ ; GFX11-NEXT: [[INT16:%[0-9]+]]:_(s32), [[INT17:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV2]](s32), [[UV5]](s32), 1
+ ; GFX11-NEXT: [[INT18:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT14]](s32)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[INT14]]
+ ; GFX11-NEXT: [[FMA10:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[INT18]], [[C]]
+ ; GFX11-NEXT: [[FMA11:%[0-9]+]]:_(s32) = G_FMA [[FMA10]], [[INT18]], [[INT18]]
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[INT16]], [[FMA11]]
+ ; GFX11-NEXT: [[FMA12:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL2]], [[INT16]]
+ ; GFX11-NEXT: [[FMA13:%[0-9]+]]:_(s32) = G_FMA [[FMA12]], [[FMA11]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA14:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA13]], [[INT16]]
+ ; GFX11-NEXT: [[INT19:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA14]](s32), [[FMA11]](s32), [[FMA13]](s32), [[INT17]](s1)
+ ; GFX11-NEXT: [[INT20:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT19]](s32), [[UV5]](s32), [[UV2]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
%2:_(<3 x s32>) = G_FDIV %0, %1
@@ -1271,35 +1637,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s64
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[UV2]]
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[UV]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[UV3]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[INT1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[INT1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMA7]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FMA8]], [[FMA7]], [[FMA7]]
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[UV1]], [[FMA9]]
- ; GFX9-UNSAFE-NEXT: [[FMA10:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMUL1]], [[UV1]]
- ; GFX9-UNSAFE-NEXT: [[FMA11:%[0-9]+]]:_(s64) = G_FMA [[FMA10]], [[FMA9]], [[FMUL1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FMA5]](s64), [[FMA11]](s64)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s64
; GFX10: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
; GFX10-NEXT: {{ $}}
@@ -1334,6 +1671,41 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s64
+ ; GFX11: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[UV2]](s64), [[UV]](s64)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s64), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 0
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s64)
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[INT9]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[INT9]], [[FMA5]], [[INT9]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMA6]], [[C]]
+ ; GFX11-NEXT: [[INT10:%[0-9]+]]:_(s64), [[INT11:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 1
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[FMA7]], [[FMA6]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[INT10]], [[FMA8]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMUL1]], [[INT10]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s64), [[FMA8]](s64), [[FMUL1]](s64), [[INT11]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
%2:_(<2 x s64>) = G_FDIV %0, %1
@@ -1502,26 +1874,92 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC2]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC3]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL]](s16), [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX10-LABEL: name: test_fdiv_v2s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC2]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC3]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV2]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV3]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_FDIV %0, %1
@@ -1756,37 +2194,133 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
; GFX9-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v3s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC3]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC4]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC5]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL2]](s16)
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
- ; GFX9-UNSAFE-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
+ ; GFX10-LABEL: name: test_fdiv_v3s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC3]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC4]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; GFX10-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX10-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX10-NEXT: [[FMUL10:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX10-NEXT: [[FMUL11:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FMUL10]]
+ ; GFX10-NEXT: [[FADD8:%[0-9]+]]:_(s32) = G_FADD [[FMUL11]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL12:%[0-9]+]]:_(s32) = G_FMUL [[FADD8]], [[INT4]]
+ ; GFX10-NEXT: [[FADD9:%[0-9]+]]:_(s32) = G_FADD [[FMUL12]], [[FMUL10]]
+ ; GFX10-NEXT: [[FMUL13:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FADD9]]
+ ; GFX10-NEXT: [[FADD10:%[0-9]+]]:_(s32) = G_FADD [[FMUL13]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL14:%[0-9]+]]:_(s32) = G_FMUL [[FADD10]], [[INT4]]
+ ; GFX10-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL14]], [[C1]]
+ ; GFX10-NEXT: [[FADD11:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FADD9]]
+ ; GFX10-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD11]](s32)
+ ; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[TRUNC5]](s16), [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX10-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[INT3]](s16)
+ ; GFX10-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[INT5]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
+ ; GFX10-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v3s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
+ ; GFX11-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX11-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV4]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV5]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX11-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL4]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FMA6]], [[INT4]], [[FMUL4]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA7]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FMA8]], [[INT4]]
+ ; GFX11-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL5]], [[C]]
+ ; GFX11-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FMA7]]
+ ; GFX11-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[UV6]](s16), [[UV2]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX11-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[INT3]](s16)
+ ; GFX11-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[INT5]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
+ ; GFX11-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s16>) = G_IMPLICIT_DEF
%1:_(<3 x s16>) = G_IMPLICIT_DEF
%2:_(<3 x s16>) = G_FDIV %0, %1
@@ -2094,42 +2628,164 @@ body: |
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v4s16
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC4]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC5]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC6]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC7]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL3:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC3]], [[INT3]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL]](s16), [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL2]](s16), [[FMUL3]](s16)
- ; GFX9-UNSAFE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; GFX10-LABEL: name: test_fdiv_v4s16
+ ; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX10-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX10-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC4]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC5]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC6]](s16)
+ ; GFX10-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX10-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX10-NEXT: [[FMUL10:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX10-NEXT: [[FMUL11:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FMUL10]]
+ ; GFX10-NEXT: [[FADD8:%[0-9]+]]:_(s32) = G_FADD [[FMUL11]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL12:%[0-9]+]]:_(s32) = G_FMUL [[FADD8]], [[INT4]]
+ ; GFX10-NEXT: [[FADD9:%[0-9]+]]:_(s32) = G_FADD [[FMUL12]], [[FMUL10]]
+ ; GFX10-NEXT: [[FMUL13:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FADD9]]
+ ; GFX10-NEXT: [[FADD10:%[0-9]+]]:_(s32) = G_FADD [[FMUL13]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL14:%[0-9]+]]:_(s32) = G_FMUL [[FADD10]], [[INT4]]
+ ; GFX10-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL14]], [[C1]]
+ ; GFX10-NEXT: [[FADD11:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FADD9]]
+ ; GFX10-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD11]](s32)
+ ; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[TRUNC6]](s16), [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC7]](s16)
+ ; GFX10-NEXT: [[FNEG3:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT7]]
+ ; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT7]](s32)
+ ; GFX10-NEXT: [[FMUL15:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT6]], [[INT6]]
+ ; GFX10-NEXT: [[FMUL16:%[0-9]+]]:_(s32) = G_FMUL [[FNEG3]], [[FMUL15]]
+ ; GFX10-NEXT: [[FADD12:%[0-9]+]]:_(s32) = G_FADD [[FMUL16]], [[FPEXT6]]
+ ; GFX10-NEXT: [[FMUL17:%[0-9]+]]:_(s32) = G_FMUL [[FADD12]], [[INT6]]
+ ; GFX10-NEXT: [[FADD13:%[0-9]+]]:_(s32) = G_FADD [[FMUL17]], [[FMUL15]]
+ ; GFX10-NEXT: [[FMUL18:%[0-9]+]]:_(s32) = G_FMUL [[FNEG3]], [[FADD13]]
+ ; GFX10-NEXT: [[FADD14:%[0-9]+]]:_(s32) = G_FADD [[FMUL18]], [[FPEXT6]]
+ ; GFX10-NEXT: [[FMUL19:%[0-9]+]]:_(s32) = G_FMUL [[FADD14]], [[INT6]]
+ ; GFX10-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FMUL19]], [[C1]]
+ ; GFX10-NEXT: [[FADD15:%[0-9]+]]:_(s32) = G_FADD [[AND3]], [[FADD13]]
+ ; GFX10-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD15]](s32)
+ ; GFX10-NEXT: [[INT7:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC3]](s16), [[TRUNC7]](s16), [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT5]](s16), [[INT7]](s16)
+ ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
+ ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v4s16
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX11-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV4]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV5]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX11-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL4]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FMA6]], [[INT4]], [[FMUL4]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA7]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FMA8]], [[INT4]]
+ ; GFX11-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL5]], [[C]]
+ ; GFX11-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FMA7]]
+ ; GFX11-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[UV6]](s16), [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+ ; GFX11-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
+ ; GFX11-NEXT: [[FNEG3:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT7]]
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT7]](s32)
+ ; GFX11-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT6]], [[INT6]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG3]], [[FMUL6]], [[FPEXT6]]
+ ; GFX11-NEXT: [[FMA10:%[0-9]+]]:_(s32) = G_FMA [[FMA9]], [[INT6]], [[FMUL6]]
+ ; GFX11-NEXT: [[FMA11:%[0-9]+]]:_(s32) = G_FMA [[FNEG3]], [[FMA10]], [[FPEXT6]]
+ ; GFX11-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FMA11]], [[INT6]]
+ ; GFX11-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FMUL7]], [[C]]
+ ; GFX11-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND3]], [[FMA10]]
+ ; GFX11-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC3]](s16), [[UV7]](s16), [[UV3]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT5]](s16), [[INT7]](s16)
+ ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr2_vgpr3
%2:_(<4 x s16>) = G_FDIV %0, %1
@@ -2185,15 +2841,6 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s16_constant_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2202,6 +2849,15 @@ body: |
; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_constant_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s16) = G_FCONSTANT half 1.0
%1:_(s32) = COPY $vgpr0
%2:_(s16) = G_TRUNC %1
@@ -2261,16 +2917,6 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2280,6 +2926,16 @@ body: |
; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s16) = G_FCONSTANT half -1.0
%1:_(s32) = COPY $vgpr0
%2:_(s16) = G_TRUNC %1
@@ -2351,13 +3007,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[INT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_constant_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2376,6 +3025,25 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_constant_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = G_FCONSTANT float 1.0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = G_FDIV %0, %1
@@ -2448,14 +3116,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[INT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2475,6 +3135,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float -1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C1]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = G_FCONSTANT float -1.0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = G_FDIV %0, %1
@@ -2558,22 +3238,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[C]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64_constant_one_rcp
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
@@ -2592,6 +3256,25 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64_constant_one_rcp
+ ; GFX11: liveins: $vgpr0_vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = G_FCONSTANT double 1.0
%1:_(s64) = COPY $vgpr0_vgpr1
%2:_(s64) = G_FDIV %0, %1
@@ -2678,23 +3361,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C1]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C1]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[C]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
@@ -2714,6 +3380,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0_vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C1]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C1]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = G_FCONSTANT double -1.0
%1:_(s64) = COPY $vgpr0_vgpr1
%2:_(s64) = G_FDIV %0, %1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-insert-vector-elt.mir
index bebbf2a..1bc7cd0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-insert-vector-elt.mir
@@ -197,82 +197,82 @@ body: |
; CHECK-NEXT: [[UV8:%[0-9]+]]:_(<4 x s32>), [[UV9:%[0-9]+]]:_(<4 x s32>), [[UV10:%[0-9]+]]:_(<4 x s32>), [[UV11:%[0-9]+]]:_(<4 x s32>), [[UV12:%[0-9]+]]:_(<4 x s32>), [[UV13:%[0-9]+]]:_(<4 x s32>), [[UV14:%[0-9]+]]:_(<4 x s32>), [[UV15:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<32 x s32>)
; CHECK-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[UV2]](<4 x s32>), [[PTR_ADD1]](p1) :: (store (<4 x s32>) into unknown-address + 32, align 4, addrspace 1)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[UV3]](<4 x s32>), [[PTR_ADD2]](p1) :: (store (<4 x s32>) into unknown-address + 48, align 4, addrspace 1)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[UV4]](<4 x s32>), [[PTR_ADD3]](p1) :: (store (<4 x s32>) into unknown-address + 64, align 4, addrspace 1)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[UV5]](<4 x s32>), [[PTR_ADD4]](p1) :: (store (<4 x s32>) into unknown-address + 80, align 4, addrspace 1)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[UV6]](<4 x s32>), [[PTR_ADD5]](p1) :: (store (<4 x s32>) into unknown-address + 96, align 4, addrspace 1)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CHECK-NEXT: G_STORE [[UV7]](<4 x s32>), [[PTR_ADD6]](p1) :: (store (<4 x s32>) into unknown-address + 112, align 4, addrspace 1)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CHECK-NEXT: G_STORE [[UV8]](<4 x s32>), [[PTR_ADD7]](p1) :: (store (<4 x s32>) into unknown-address + 128, align 4, addrspace 1)
; CHECK-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
- ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; CHECK-NEXT: G_STORE [[UV9]](<4 x s32>), [[PTR_ADD8]](p1) :: (store (<4 x s32>) into unknown-address + 144, align 4, addrspace 1)
; CHECK-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 160
- ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64)
+ ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s64)
; CHECK-NEXT: G_STORE [[UV10]](<4 x s32>), [[PTR_ADD9]](p1) :: (store (<4 x s32>) into unknown-address + 160, align 4, addrspace 1)
; CHECK-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 176
- ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64)
+ ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s64)
; CHECK-NEXT: G_STORE [[UV11]](<4 x s32>), [[PTR_ADD10]](p1) :: (store (<4 x s32>) into unknown-address + 176, align 4, addrspace 1)
; CHECK-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
- ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64)
+ ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s64)
; CHECK-NEXT: G_STORE [[UV12]](<4 x s32>), [[PTR_ADD11]](p1) :: (store (<4 x s32>) into unknown-address + 192, align 4, addrspace 1)
; CHECK-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 208
- ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C12]](s64)
+ ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s64)
; CHECK-NEXT: G_STORE [[UV13]](<4 x s32>), [[PTR_ADD12]](p1) :: (store (<4 x s32>) into unknown-address + 208, align 4, addrspace 1)
; CHECK-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 224
- ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C13]](s64)
+ ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s64)
; CHECK-NEXT: G_STORE [[UV14]](<4 x s32>), [[PTR_ADD13]](p1) :: (store (<4 x s32>) into unknown-address + 224, align 4, addrspace 1)
; CHECK-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 240
- ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C14]](s64)
+ ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s64)
; CHECK-NEXT: G_STORE [[UV15]](<4 x s32>), [[PTR_ADD14]](p1) :: (store (<4 x s32>) into unknown-address + 240, align 4, addrspace 1)
; CHECK-NEXT: [[UV16:%[0-9]+]]:_(<4 x s32>), [[UV17:%[0-9]+]]:_(<4 x s32>), [[UV18:%[0-9]+]]:_(<4 x s32>), [[UV19:%[0-9]+]]:_(<4 x s32>), [[UV20:%[0-9]+]]:_(<4 x s32>), [[UV21:%[0-9]+]]:_(<4 x s32>), [[UV22:%[0-9]+]]:_(<4 x s32>), [[UV23:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<32 x s32>)
; CHECK-NEXT: [[UV24:%[0-9]+]]:_(<4 x s32>), [[UV25:%[0-9]+]]:_(<4 x s32>), [[UV26:%[0-9]+]]:_(<4 x s32>), [[UV27:%[0-9]+]]:_(<4 x s32>), [[UV28:%[0-9]+]]:_(<4 x s32>), [[UV29:%[0-9]+]]:_(<4 x s32>), [[UV30:%[0-9]+]]:_(<4 x s32>), [[UV31:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<32 x s32>)
; CHECK-NEXT: G_STORE [[UV16]](<4 x s32>), [[COPY1]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UV17]](<4 x s32>), [[PTR_ADD15]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[UV18]](<4 x s32>), [[PTR_ADD16]](p1) :: (store (<4 x s32>) into unknown-address + 32, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[UV19]](<4 x s32>), [[PTR_ADD17]](p1) :: (store (<4 x s32>) into unknown-address + 48, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+ ; CHECK-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK-NEXT: G_STORE [[UV20]](<4 x s32>), [[PTR_ADD18]](p1) :: (store (<4 x s32>) into unknown-address + 64, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[UV21]](<4 x s32>), [[PTR_ADD19]](p1) :: (store (<4 x s32>) into unknown-address + 80, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[UV22]](<4 x s32>), [[PTR_ADD20]](p1) :: (store (<4 x s32>) into unknown-address + 96, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C6]](s64)
; CHECK-NEXT: G_STORE [[UV23]](<4 x s32>), [[PTR_ADD21]](p1) :: (store (<4 x s32>) into unknown-address + 112, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C7]](s64)
; CHECK-NEXT: G_STORE [[UV24]](<4 x s32>), [[PTR_ADD22]](p1) :: (store (<4 x s32>) into unknown-address + 128, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C8]](s64)
+ ; CHECK-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C8]](s64)
; CHECK-NEXT: G_STORE [[UV25]](<4 x s32>), [[PTR_ADD23]](p1) :: (store (<4 x s32>) into unknown-address + 144, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C9]](s64)
+ ; CHECK-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C9]](s64)
; CHECK-NEXT: G_STORE [[UV26]](<4 x s32>), [[PTR_ADD24]](p1) :: (store (<4 x s32>) into unknown-address + 160, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C10]](s64)
+ ; CHECK-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C10]](s64)
; CHECK-NEXT: G_STORE [[UV27]](<4 x s32>), [[PTR_ADD25]](p1) :: (store (<4 x s32>) into unknown-address + 176, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C11]](s64)
+ ; CHECK-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C11]](s64)
; CHECK-NEXT: G_STORE [[UV28]](<4 x s32>), [[PTR_ADD26]](p1) :: (store (<4 x s32>) into unknown-address + 192, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C12]](s64)
+ ; CHECK-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C12]](s64)
; CHECK-NEXT: G_STORE [[UV29]](<4 x s32>), [[PTR_ADD27]](p1) :: (store (<4 x s32>) into unknown-address + 208, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C13]](s64)
+ ; CHECK-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C13]](s64)
; CHECK-NEXT: G_STORE [[UV30]](<4 x s32>), [[PTR_ADD28]](p1) :: (store (<4 x s32>) into unknown-address + 224, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C14]](s64)
+ ; CHECK-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C14]](s64)
; CHECK-NEXT: G_STORE [[UV31]](<4 x s32>), [[PTR_ADD29]](p1) :: (store (<4 x s32>) into unknown-address + 240, align 4, addrspace 1)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_CONSTANT i32 64
@@ -300,13 +300,13 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (<16 x s32>), align 4, addrspace 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 64, align 4, addrspace 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<16 x s32>) from unknown-address + 192, align 4, addrspace 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12345
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<16 x s32>)
@@ -318,46 +318,46 @@ body: |
; CHECK-NEXT: [[UV28:%[0-9]+]]:_(<4 x s32>), [[UV29:%[0-9]+]]:_(<4 x s32>), [[UV30:%[0-9]+]]:_(<4 x s32>), [[UV31:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
; CHECK-NEXT: G_STORE [[UV24]](<4 x s32>), [[COPY1]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[UV25]](<4 x s32>), [[PTR_ADD3]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[UV26]](<4 x s32>), [[PTR_ADD4]](p1) :: (store (<4 x s32>) into unknown-address + 32, align 4, addrspace 1)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C6]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C6]](s64)
; CHECK-NEXT: G_STORE [[UV27]](<4 x s32>), [[PTR_ADD5]](p1) :: (store (<4 x s32>) into unknown-address + 48, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK-NEXT: G_STORE [[UV28]](<4 x s32>), [[PTR_ADD6]](p1) :: (store (<4 x s32>) into unknown-address + 64, align 4, addrspace 1)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C7]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C7]](s64)
; CHECK-NEXT: G_STORE [[UV29]](<4 x s32>), [[PTR_ADD7]](p1) :: (store (<4 x s32>) into unknown-address + 80, align 4, addrspace 1)
; CHECK-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
- ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C8]](s64)
+ ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C8]](s64)
; CHECK-NEXT: G_STORE [[UV30]](<4 x s32>), [[PTR_ADD8]](p1) :: (store (<4 x s32>) into unknown-address + 96, align 4, addrspace 1)
; CHECK-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
- ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C9]](s64)
+ ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C9]](s64)
; CHECK-NEXT: G_STORE [[UV31]](<4 x s32>), [[PTR_ADD9]](p1) :: (store (<4 x s32>) into unknown-address + 112, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[PTR_ADD10]](p1) :: (store (<4 x s32>) into unknown-address + 128, align 4, addrspace 1)
; CHECK-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
- ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C10]](s64)
+ ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C10]](s64)
; CHECK-NEXT: G_STORE [[UV17]](<4 x s32>), [[PTR_ADD11]](p1) :: (store (<4 x s32>) into unknown-address + 144, align 4, addrspace 1)
; CHECK-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 160
- ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C11]](s64)
+ ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C11]](s64)
; CHECK-NEXT: G_STORE [[UV18]](<4 x s32>), [[PTR_ADD12]](p1) :: (store (<4 x s32>) into unknown-address + 160, align 4, addrspace 1)
; CHECK-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 176
- ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C12]](s64)
+ ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C12]](s64)
; CHECK-NEXT: G_STORE [[UV19]](<4 x s32>), [[PTR_ADD13]](p1) :: (store (<4 x s32>) into unknown-address + 176, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[UV20]](<4 x s32>), [[PTR_ADD14]](p1) :: (store (<4 x s32>) into unknown-address + 192, align 4, addrspace 1)
; CHECK-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 208
- ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C13]](s64)
+ ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C13]](s64)
; CHECK-NEXT: G_STORE [[UV21]](<4 x s32>), [[PTR_ADD15]](p1) :: (store (<4 x s32>) into unknown-address + 208, align 4, addrspace 1)
; CHECK-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 224
- ; CHECK-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C14]](s64)
+ ; CHECK-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C14]](s64)
; CHECK-NEXT: G_STORE [[UV22]](<4 x s32>), [[PTR_ADD16]](p1) :: (store (<4 x s32>) into unknown-address + 224, align 4, addrspace 1)
; CHECK-NEXT: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 240
- ; CHECK-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C15]](s64)
+ ; CHECK-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C15]](s64)
; CHECK-NEXT: G_STORE [[UV23]](<4 x s32>), [[PTR_ADD17]](p1) :: (store (<4 x s32>) into unknown-address + 240, align 4, addrspace 1)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_CONSTANT i32 33
@@ -382,13 +382,13 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (<16 x s32>), align 4, addrspace 4)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 64, align 4, addrspace 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<16 x s32>) from unknown-address + 192, align 4, addrspace 4)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12345
; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
@@ -398,255 +398,255 @@ body: |
; CHECK-NEXT: [[UV48:%[0-9]+]]:_(s32), [[UV49:%[0-9]+]]:_(s32), [[UV50:%[0-9]+]]:_(s32), [[UV51:%[0-9]+]]:_(s32), [[UV52:%[0-9]+]]:_(s32), [[UV53:%[0-9]+]]:_(s32), [[UV54:%[0-9]+]]:_(s32), [[UV55:%[0-9]+]]:_(s32), [[UV56:%[0-9]+]]:_(s32), [[UV57:%[0-9]+]]:_(s32), [[UV58:%[0-9]+]]:_(s32), [[UV59:%[0-9]+]]:_(s32), [[UV60:%[0-9]+]]:_(s32), [[UV61:%[0-9]+]]:_(s32), [[UV62:%[0-9]+]]:_(s32), [[UV63:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD3]](<16 x s32>)
; CHECK-NEXT: G_STORE [[UV]](s32), [[FRAME_INDEX]](p5) :: (store (s32) into %stack.0, align 256, addrspace 5)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p5) = COPY [[PTR_ADD3]](p5)
; CHECK-NEXT: G_STORE [[UV1]](s32), [[COPY2]](p5) :: (store (s32) into %stack.0 + 4, basealign 256, addrspace 5)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(p5) = COPY [[PTR_ADD4]](p5)
; CHECK-NEXT: G_STORE [[UV2]](s32), [[COPY3]](p5) :: (store (s32) into %stack.0 + 8, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(p5) = COPY [[PTR_ADD5]](p5)
; CHECK-NEXT: G_STORE [[UV3]](s32), [[COPY4]](p5) :: (store (s32) into %stack.0 + 12, basealign 256, addrspace 5)
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(p5) = COPY [[PTR_ADD6]](p5)
; CHECK-NEXT: G_STORE [[UV4]](s32), [[COPY5]](p5) :: (store (s32) into %stack.0 + 16, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(p5) = COPY [[PTR_ADD7]](p5)
; CHECK-NEXT: G_STORE [[UV5]](s32), [[COPY6]](p5) :: (store (s32) into %stack.0 + 20, basealign 256, addrspace 5)
; CHECK-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
+ ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(p5) = COPY [[PTR_ADD8]](p5)
; CHECK-NEXT: G_STORE [[UV6]](s32), [[COPY7]](p5) :: (store (s32) into %stack.0 + 24, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
+ ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(p5) = COPY [[PTR_ADD9]](p5)
; CHECK-NEXT: G_STORE [[UV7]](s32), [[COPY8]](p5) :: (store (s32) into %stack.0 + 28, basealign 256, addrspace 5)
; CHECK-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
+ ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(p5) = COPY [[PTR_ADD10]](p5)
; CHECK-NEXT: G_STORE [[UV8]](s32), [[COPY9]](p5) :: (store (s32) into %stack.0 + 32, align 32, basealign 256, addrspace 5)
; CHECK-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
- ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
+ ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(p5) = COPY [[PTR_ADD11]](p5)
; CHECK-NEXT: G_STORE [[UV9]](s32), [[COPY10]](p5) :: (store (s32) into %stack.0 + 36, basealign 256, addrspace 5)
; CHECK-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
+ ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(p5) = COPY [[PTR_ADD12]](p5)
; CHECK-NEXT: G_STORE [[UV10]](s32), [[COPY11]](p5) :: (store (s32) into %stack.0 + 40, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
- ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
+ ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(p5) = COPY [[PTR_ADD13]](p5)
; CHECK-NEXT: G_STORE [[UV11]](s32), [[COPY12]](p5) :: (store (s32) into %stack.0 + 44, basealign 256, addrspace 5)
; CHECK-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
+ ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(p5) = COPY [[PTR_ADD14]](p5)
; CHECK-NEXT: G_STORE [[UV12]](s32), [[COPY13]](p5) :: (store (s32) into %stack.0 + 48, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
- ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
+ ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(p5) = COPY [[PTR_ADD15]](p5)
; CHECK-NEXT: G_STORE [[UV13]](s32), [[COPY14]](p5) :: (store (s32) into %stack.0 + 52, basealign 256, addrspace 5)
; CHECK-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; CHECK-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
+ ; CHECK-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(p5) = COPY [[PTR_ADD16]](p5)
; CHECK-NEXT: G_STORE [[UV14]](s32), [[COPY15]](p5) :: (store (s32) into %stack.0 + 56, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
- ; CHECK-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
+ ; CHECK-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
; CHECK-NEXT: [[COPY16:%[0-9]+]]:_(p5) = COPY [[PTR_ADD17]](p5)
; CHECK-NEXT: G_STORE [[UV15]](s32), [[COPY16]](p5) :: (store (s32) into %stack.0 + 60, basealign 256, addrspace 5)
; CHECK-NEXT: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
- ; CHECK-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
+ ; CHECK-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
; CHECK-NEXT: [[COPY17:%[0-9]+]]:_(p5) = COPY [[PTR_ADD18]](p5)
; CHECK-NEXT: G_STORE [[UV16]](s32), [[COPY17]](p5) :: (store (s32) into %stack.0 + 64, align 64, basealign 256, addrspace 5)
; CHECK-NEXT: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 68
- ; CHECK-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
+ ; CHECK-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
; CHECK-NEXT: [[COPY18:%[0-9]+]]:_(p5) = COPY [[PTR_ADD19]](p5)
; CHECK-NEXT: G_STORE [[UV17]](s32), [[COPY18]](p5) :: (store (s32) into %stack.0 + 68, basealign 256, addrspace 5)
; CHECK-NEXT: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 72
- ; CHECK-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
+ ; CHECK-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
; CHECK-NEXT: [[COPY19:%[0-9]+]]:_(p5) = COPY [[PTR_ADD20]](p5)
; CHECK-NEXT: G_STORE [[UV18]](s32), [[COPY19]](p5) :: (store (s32) into %stack.0 + 72, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
- ; CHECK-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
+ ; CHECK-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
; CHECK-NEXT: [[COPY20:%[0-9]+]]:_(p5) = COPY [[PTR_ADD21]](p5)
; CHECK-NEXT: G_STORE [[UV19]](s32), [[COPY20]](p5) :: (store (s32) into %stack.0 + 76, basealign 256, addrspace 5)
; CHECK-NEXT: [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 80
- ; CHECK-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
+ ; CHECK-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
; CHECK-NEXT: [[COPY21:%[0-9]+]]:_(p5) = COPY [[PTR_ADD22]](p5)
; CHECK-NEXT: G_STORE [[UV20]](s32), [[COPY21]](p5) :: (store (s32) into %stack.0 + 80, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 84
- ; CHECK-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
+ ; CHECK-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
; CHECK-NEXT: [[COPY22:%[0-9]+]]:_(p5) = COPY [[PTR_ADD23]](p5)
; CHECK-NEXT: G_STORE [[UV21]](s32), [[COPY22]](p5) :: (store (s32) into %stack.0 + 84, basealign 256, addrspace 5)
; CHECK-NEXT: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 88
- ; CHECK-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
+ ; CHECK-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
; CHECK-NEXT: [[COPY23:%[0-9]+]]:_(p5) = COPY [[PTR_ADD24]](p5)
; CHECK-NEXT: G_STORE [[UV22]](s32), [[COPY23]](p5) :: (store (s32) into %stack.0 + 88, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 92
- ; CHECK-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
+ ; CHECK-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
; CHECK-NEXT: [[COPY24:%[0-9]+]]:_(p5) = COPY [[PTR_ADD25]](p5)
; CHECK-NEXT: G_STORE [[UV23]](s32), [[COPY24]](p5) :: (store (s32) into %stack.0 + 92, basealign 256, addrspace 5)
; CHECK-NEXT: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 96
- ; CHECK-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
+ ; CHECK-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
; CHECK-NEXT: [[COPY25:%[0-9]+]]:_(p5) = COPY [[PTR_ADD26]](p5)
; CHECK-NEXT: G_STORE [[UV24]](s32), [[COPY25]](p5) :: (store (s32) into %stack.0 + 96, align 32, basealign 256, addrspace 5)
; CHECK-NEXT: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
- ; CHECK-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
+ ; CHECK-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
; CHECK-NEXT: [[COPY26:%[0-9]+]]:_(p5) = COPY [[PTR_ADD27]](p5)
; CHECK-NEXT: G_STORE [[UV25]](s32), [[COPY26]](p5) :: (store (s32) into %stack.0 + 100, basealign 256, addrspace 5)
; CHECK-NEXT: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
- ; CHECK-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
+ ; CHECK-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
; CHECK-NEXT: [[COPY27:%[0-9]+]]:_(p5) = COPY [[PTR_ADD28]](p5)
; CHECK-NEXT: G_STORE [[UV26]](s32), [[COPY27]](p5) :: (store (s32) into %stack.0 + 104, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 108
- ; CHECK-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
+ ; CHECK-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
; CHECK-NEXT: [[COPY28:%[0-9]+]]:_(p5) = COPY [[PTR_ADD29]](p5)
; CHECK-NEXT: G_STORE [[UV27]](s32), [[COPY28]](p5) :: (store (s32) into %stack.0 + 108, basealign 256, addrspace 5)
; CHECK-NEXT: [[C31:%[0-9]+]]:_(s32) = G_CONSTANT i32 112
- ; CHECK-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
+ ; CHECK-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
; CHECK-NEXT: [[COPY29:%[0-9]+]]:_(p5) = COPY [[PTR_ADD30]](p5)
; CHECK-NEXT: G_STORE [[UV28]](s32), [[COPY29]](p5) :: (store (s32) into %stack.0 + 112, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C32:%[0-9]+]]:_(s32) = G_CONSTANT i32 116
- ; CHECK-NEXT: [[PTR_ADD31:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
+ ; CHECK-NEXT: [[PTR_ADD31:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
; CHECK-NEXT: [[COPY30:%[0-9]+]]:_(p5) = COPY [[PTR_ADD31]](p5)
; CHECK-NEXT: G_STORE [[UV29]](s32), [[COPY30]](p5) :: (store (s32) into %stack.0 + 116, basealign 256, addrspace 5)
; CHECK-NEXT: [[C33:%[0-9]+]]:_(s32) = G_CONSTANT i32 120
- ; CHECK-NEXT: [[PTR_ADD32:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
+ ; CHECK-NEXT: [[PTR_ADD32:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
; CHECK-NEXT: [[COPY31:%[0-9]+]]:_(p5) = COPY [[PTR_ADD32]](p5)
; CHECK-NEXT: G_STORE [[UV30]](s32), [[COPY31]](p5) :: (store (s32) into %stack.0 + 120, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 124
- ; CHECK-NEXT: [[PTR_ADD33:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
+ ; CHECK-NEXT: [[PTR_ADD33:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
; CHECK-NEXT: [[COPY32:%[0-9]+]]:_(p5) = COPY [[PTR_ADD33]](p5)
; CHECK-NEXT: G_STORE [[UV31]](s32), [[COPY32]](p5) :: (store (s32) into %stack.0 + 124, basealign 256, addrspace 5)
; CHECK-NEXT: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
- ; CHECK-NEXT: [[PTR_ADD34:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
+ ; CHECK-NEXT: [[PTR_ADD34:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
; CHECK-NEXT: [[COPY33:%[0-9]+]]:_(p5) = COPY [[PTR_ADD34]](p5)
; CHECK-NEXT: G_STORE [[UV32]](s32), [[COPY33]](p5) :: (store (s32) into %stack.0 + 128, align 128, basealign 256, addrspace 5)
; CHECK-NEXT: [[C36:%[0-9]+]]:_(s32) = G_CONSTANT i32 132
- ; CHECK-NEXT: [[PTR_ADD35:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
+ ; CHECK-NEXT: [[PTR_ADD35:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
; CHECK-NEXT: [[COPY34:%[0-9]+]]:_(p5) = COPY [[PTR_ADD35]](p5)
; CHECK-NEXT: G_STORE [[UV33]](s32), [[COPY34]](p5) :: (store (s32) into %stack.0 + 132, basealign 256, addrspace 5)
; CHECK-NEXT: [[C37:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
- ; CHECK-NEXT: [[PTR_ADD36:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
+ ; CHECK-NEXT: [[PTR_ADD36:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
; CHECK-NEXT: [[COPY35:%[0-9]+]]:_(p5) = COPY [[PTR_ADD36]](p5)
; CHECK-NEXT: G_STORE [[UV34]](s32), [[COPY35]](p5) :: (store (s32) into %stack.0 + 136, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C38:%[0-9]+]]:_(s32) = G_CONSTANT i32 140
- ; CHECK-NEXT: [[PTR_ADD37:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
+ ; CHECK-NEXT: [[PTR_ADD37:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
; CHECK-NEXT: [[COPY36:%[0-9]+]]:_(p5) = COPY [[PTR_ADD37]](p5)
; CHECK-NEXT: G_STORE [[UV35]](s32), [[COPY36]](p5) :: (store (s32) into %stack.0 + 140, basealign 256, addrspace 5)
; CHECK-NEXT: [[C39:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
- ; CHECK-NEXT: [[PTR_ADD38:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
+ ; CHECK-NEXT: [[PTR_ADD38:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
; CHECK-NEXT: [[COPY37:%[0-9]+]]:_(p5) = COPY [[PTR_ADD38]](p5)
; CHECK-NEXT: G_STORE [[UV36]](s32), [[COPY37]](p5) :: (store (s32) into %stack.0 + 144, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C40:%[0-9]+]]:_(s32) = G_CONSTANT i32 148
- ; CHECK-NEXT: [[PTR_ADD39:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
+ ; CHECK-NEXT: [[PTR_ADD39:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
; CHECK-NEXT: [[COPY38:%[0-9]+]]:_(p5) = COPY [[PTR_ADD39]](p5)
; CHECK-NEXT: G_STORE [[UV37]](s32), [[COPY38]](p5) :: (store (s32) into %stack.0 + 148, basealign 256, addrspace 5)
; CHECK-NEXT: [[C41:%[0-9]+]]:_(s32) = G_CONSTANT i32 152
- ; CHECK-NEXT: [[PTR_ADD40:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
+ ; CHECK-NEXT: [[PTR_ADD40:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
; CHECK-NEXT: [[COPY39:%[0-9]+]]:_(p5) = COPY [[PTR_ADD40]](p5)
; CHECK-NEXT: G_STORE [[UV38]](s32), [[COPY39]](p5) :: (store (s32) into %stack.0 + 152, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C42:%[0-9]+]]:_(s32) = G_CONSTANT i32 156
- ; CHECK-NEXT: [[PTR_ADD41:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
+ ; CHECK-NEXT: [[PTR_ADD41:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
; CHECK-NEXT: [[COPY40:%[0-9]+]]:_(p5) = COPY [[PTR_ADD41]](p5)
; CHECK-NEXT: G_STORE [[UV39]](s32), [[COPY40]](p5) :: (store (s32) into %stack.0 + 156, basealign 256, addrspace 5)
; CHECK-NEXT: [[C43:%[0-9]+]]:_(s32) = G_CONSTANT i32 160
- ; CHECK-NEXT: [[PTR_ADD42:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
+ ; CHECK-NEXT: [[PTR_ADD42:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
; CHECK-NEXT: [[COPY41:%[0-9]+]]:_(p5) = COPY [[PTR_ADD42]](p5)
; CHECK-NEXT: G_STORE [[UV40]](s32), [[COPY41]](p5) :: (store (s32) into %stack.0 + 160, align 32, basealign 256, addrspace 5)
; CHECK-NEXT: [[C44:%[0-9]+]]:_(s32) = G_CONSTANT i32 164
- ; CHECK-NEXT: [[PTR_ADD43:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
+ ; CHECK-NEXT: [[PTR_ADD43:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
; CHECK-NEXT: [[COPY42:%[0-9]+]]:_(p5) = COPY [[PTR_ADD43]](p5)
; CHECK-NEXT: G_STORE [[UV41]](s32), [[COPY42]](p5) :: (store (s32) into %stack.0 + 164, basealign 256, addrspace 5)
; CHECK-NEXT: [[C45:%[0-9]+]]:_(s32) = G_CONSTANT i32 168
- ; CHECK-NEXT: [[PTR_ADD44:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
+ ; CHECK-NEXT: [[PTR_ADD44:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
; CHECK-NEXT: [[COPY43:%[0-9]+]]:_(p5) = COPY [[PTR_ADD44]](p5)
; CHECK-NEXT: G_STORE [[UV42]](s32), [[COPY43]](p5) :: (store (s32) into %stack.0 + 168, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C46:%[0-9]+]]:_(s32) = G_CONSTANT i32 172
- ; CHECK-NEXT: [[PTR_ADD45:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
+ ; CHECK-NEXT: [[PTR_ADD45:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
; CHECK-NEXT: [[COPY44:%[0-9]+]]:_(p5) = COPY [[PTR_ADD45]](p5)
; CHECK-NEXT: G_STORE [[UV43]](s32), [[COPY44]](p5) :: (store (s32) into %stack.0 + 172, basealign 256, addrspace 5)
; CHECK-NEXT: [[C47:%[0-9]+]]:_(s32) = G_CONSTANT i32 176
- ; CHECK-NEXT: [[PTR_ADD46:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
+ ; CHECK-NEXT: [[PTR_ADD46:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
; CHECK-NEXT: [[COPY45:%[0-9]+]]:_(p5) = COPY [[PTR_ADD46]](p5)
; CHECK-NEXT: G_STORE [[UV44]](s32), [[COPY45]](p5) :: (store (s32) into %stack.0 + 176, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C48:%[0-9]+]]:_(s32) = G_CONSTANT i32 180
- ; CHECK-NEXT: [[PTR_ADD47:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
+ ; CHECK-NEXT: [[PTR_ADD47:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
; CHECK-NEXT: [[COPY46:%[0-9]+]]:_(p5) = COPY [[PTR_ADD47]](p5)
; CHECK-NEXT: G_STORE [[UV45]](s32), [[COPY46]](p5) :: (store (s32) into %stack.0 + 180, basealign 256, addrspace 5)
; CHECK-NEXT: [[C49:%[0-9]+]]:_(s32) = G_CONSTANT i32 184
- ; CHECK-NEXT: [[PTR_ADD48:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
+ ; CHECK-NEXT: [[PTR_ADD48:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
; CHECK-NEXT: [[COPY47:%[0-9]+]]:_(p5) = COPY [[PTR_ADD48]](p5)
; CHECK-NEXT: G_STORE [[UV46]](s32), [[COPY47]](p5) :: (store (s32) into %stack.0 + 184, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C50:%[0-9]+]]:_(s32) = G_CONSTANT i32 188
- ; CHECK-NEXT: [[PTR_ADD49:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
+ ; CHECK-NEXT: [[PTR_ADD49:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
; CHECK-NEXT: [[COPY48:%[0-9]+]]:_(p5) = COPY [[PTR_ADD49]](p5)
; CHECK-NEXT: G_STORE [[UV47]](s32), [[COPY48]](p5) :: (store (s32) into %stack.0 + 188, basealign 256, addrspace 5)
; CHECK-NEXT: [[C51:%[0-9]+]]:_(s32) = G_CONSTANT i32 192
- ; CHECK-NEXT: [[PTR_ADD50:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
+ ; CHECK-NEXT: [[PTR_ADD50:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
; CHECK-NEXT: [[COPY49:%[0-9]+]]:_(p5) = COPY [[PTR_ADD50]](p5)
; CHECK-NEXT: G_STORE [[UV48]](s32), [[COPY49]](p5) :: (store (s32) into %stack.0 + 192, align 64, basealign 256, addrspace 5)
; CHECK-NEXT: [[C52:%[0-9]+]]:_(s32) = G_CONSTANT i32 196
- ; CHECK-NEXT: [[PTR_ADD51:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
+ ; CHECK-NEXT: [[PTR_ADD51:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
; CHECK-NEXT: [[COPY50:%[0-9]+]]:_(p5) = COPY [[PTR_ADD51]](p5)
; CHECK-NEXT: G_STORE [[UV49]](s32), [[COPY50]](p5) :: (store (s32) into %stack.0 + 196, basealign 256, addrspace 5)
; CHECK-NEXT: [[C53:%[0-9]+]]:_(s32) = G_CONSTANT i32 200
- ; CHECK-NEXT: [[PTR_ADD52:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
+ ; CHECK-NEXT: [[PTR_ADD52:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
; CHECK-NEXT: [[COPY51:%[0-9]+]]:_(p5) = COPY [[PTR_ADD52]](p5)
; CHECK-NEXT: G_STORE [[UV50]](s32), [[COPY51]](p5) :: (store (s32) into %stack.0 + 200, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C54:%[0-9]+]]:_(s32) = G_CONSTANT i32 204
- ; CHECK-NEXT: [[PTR_ADD53:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
+ ; CHECK-NEXT: [[PTR_ADD53:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
; CHECK-NEXT: [[COPY52:%[0-9]+]]:_(p5) = COPY [[PTR_ADD53]](p5)
; CHECK-NEXT: G_STORE [[UV51]](s32), [[COPY52]](p5) :: (store (s32) into %stack.0 + 204, basealign 256, addrspace 5)
; CHECK-NEXT: [[C55:%[0-9]+]]:_(s32) = G_CONSTANT i32 208
- ; CHECK-NEXT: [[PTR_ADD54:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
+ ; CHECK-NEXT: [[PTR_ADD54:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
; CHECK-NEXT: [[COPY53:%[0-9]+]]:_(p5) = COPY [[PTR_ADD54]](p5)
; CHECK-NEXT: G_STORE [[UV52]](s32), [[COPY53]](p5) :: (store (s32) into %stack.0 + 208, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C56:%[0-9]+]]:_(s32) = G_CONSTANT i32 212
- ; CHECK-NEXT: [[PTR_ADD55:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
+ ; CHECK-NEXT: [[PTR_ADD55:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
; CHECK-NEXT: [[COPY54:%[0-9]+]]:_(p5) = COPY [[PTR_ADD55]](p5)
; CHECK-NEXT: G_STORE [[UV53]](s32), [[COPY54]](p5) :: (store (s32) into %stack.0 + 212, basealign 256, addrspace 5)
; CHECK-NEXT: [[C57:%[0-9]+]]:_(s32) = G_CONSTANT i32 216
- ; CHECK-NEXT: [[PTR_ADD56:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
+ ; CHECK-NEXT: [[PTR_ADD56:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
; CHECK-NEXT: [[COPY55:%[0-9]+]]:_(p5) = COPY [[PTR_ADD56]](p5)
; CHECK-NEXT: G_STORE [[UV54]](s32), [[COPY55]](p5) :: (store (s32) into %stack.0 + 216, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C58:%[0-9]+]]:_(s32) = G_CONSTANT i32 220
- ; CHECK-NEXT: [[PTR_ADD57:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
+ ; CHECK-NEXT: [[PTR_ADD57:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
; CHECK-NEXT: [[COPY56:%[0-9]+]]:_(p5) = COPY [[PTR_ADD57]](p5)
; CHECK-NEXT: G_STORE [[UV55]](s32), [[COPY56]](p5) :: (store (s32) into %stack.0 + 220, basealign 256, addrspace 5)
; CHECK-NEXT: [[C59:%[0-9]+]]:_(s32) = G_CONSTANT i32 224
- ; CHECK-NEXT: [[PTR_ADD58:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
+ ; CHECK-NEXT: [[PTR_ADD58:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
; CHECK-NEXT: [[COPY57:%[0-9]+]]:_(p5) = COPY [[PTR_ADD58]](p5)
; CHECK-NEXT: G_STORE [[UV56]](s32), [[COPY57]](p5) :: (store (s32) into %stack.0 + 224, align 32, basealign 256, addrspace 5)
; CHECK-NEXT: [[C60:%[0-9]+]]:_(s32) = G_CONSTANT i32 228
- ; CHECK-NEXT: [[PTR_ADD59:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
+ ; CHECK-NEXT: [[PTR_ADD59:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
; CHECK-NEXT: [[COPY58:%[0-9]+]]:_(p5) = COPY [[PTR_ADD59]](p5)
; CHECK-NEXT: G_STORE [[UV57]](s32), [[COPY58]](p5) :: (store (s32) into %stack.0 + 228, basealign 256, addrspace 5)
; CHECK-NEXT: [[C61:%[0-9]+]]:_(s32) = G_CONSTANT i32 232
- ; CHECK-NEXT: [[PTR_ADD60:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
+ ; CHECK-NEXT: [[PTR_ADD60:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
; CHECK-NEXT: [[COPY59:%[0-9]+]]:_(p5) = COPY [[PTR_ADD60]](p5)
; CHECK-NEXT: G_STORE [[UV58]](s32), [[COPY59]](p5) :: (store (s32) into %stack.0 + 232, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C62:%[0-9]+]]:_(s32) = G_CONSTANT i32 236
- ; CHECK-NEXT: [[PTR_ADD61:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
+ ; CHECK-NEXT: [[PTR_ADD61:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
; CHECK-NEXT: [[COPY60:%[0-9]+]]:_(p5) = COPY [[PTR_ADD61]](p5)
; CHECK-NEXT: G_STORE [[UV59]](s32), [[COPY60]](p5) :: (store (s32) into %stack.0 + 236, basealign 256, addrspace 5)
; CHECK-NEXT: [[C63:%[0-9]+]]:_(s32) = G_CONSTANT i32 240
- ; CHECK-NEXT: [[PTR_ADD62:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
+ ; CHECK-NEXT: [[PTR_ADD62:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
; CHECK-NEXT: [[COPY61:%[0-9]+]]:_(p5) = COPY [[PTR_ADD62]](p5)
; CHECK-NEXT: G_STORE [[UV60]](s32), [[COPY61]](p5) :: (store (s32) into %stack.0 + 240, align 16, basealign 256, addrspace 5)
; CHECK-NEXT: [[C64:%[0-9]+]]:_(s32) = G_CONSTANT i32 244
- ; CHECK-NEXT: [[PTR_ADD63:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
+ ; CHECK-NEXT: [[PTR_ADD63:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
; CHECK-NEXT: [[COPY62:%[0-9]+]]:_(p5) = COPY [[PTR_ADD63]](p5)
; CHECK-NEXT: G_STORE [[UV61]](s32), [[COPY62]](p5) :: (store (s32) into %stack.0 + 244, basealign 256, addrspace 5)
; CHECK-NEXT: [[C65:%[0-9]+]]:_(s32) = G_CONSTANT i32 248
- ; CHECK-NEXT: [[PTR_ADD64:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
+ ; CHECK-NEXT: [[PTR_ADD64:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
; CHECK-NEXT: [[COPY63:%[0-9]+]]:_(p5) = COPY [[PTR_ADD64]](p5)
; CHECK-NEXT: G_STORE [[UV62]](s32), [[COPY63]](p5) :: (store (s32) into %stack.0 + 248, align 8, basealign 256, addrspace 5)
; CHECK-NEXT: [[C66:%[0-9]+]]:_(s32) = G_CONSTANT i32 252
- ; CHECK-NEXT: [[PTR_ADD65:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C66]](s32)
+ ; CHECK-NEXT: [[PTR_ADD65:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C66]](s32)
; CHECK-NEXT: [[COPY64:%[0-9]+]]:_(p5) = COPY [[PTR_ADD65]](p5)
; CHECK-NEXT: G_STORE [[UV63]](s32), [[COPY64]](p5) :: (store (s32) into %stack.0 + 252, basealign 256, addrspace 5)
; CHECK-NEXT: [[C67:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
@@ -737,46 +737,46 @@ body: |
; CHECK-NEXT: [[COPY65:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY65]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; CHECK-NEXT: [[C68:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CHECK-NEXT: [[PTR_ADD67:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C68]](s64)
+ ; CHECK-NEXT: [[PTR_ADD67:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C68]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD67]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
; CHECK-NEXT: [[C69:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK-NEXT: [[PTR_ADD68:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C69]](s64)
+ ; CHECK-NEXT: [[PTR_ADD68:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C69]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR2]](<4 x s32>), [[PTR_ADD68]](p1) :: (store (<4 x s32>) into unknown-address + 32, align 4, addrspace 1)
; CHECK-NEXT: [[C70:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CHECK-NEXT: [[PTR_ADD69:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C70]](s64)
+ ; CHECK-NEXT: [[PTR_ADD69:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C70]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR3]](<4 x s32>), [[PTR_ADD69]](p1) :: (store (<4 x s32>) into unknown-address + 48, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD70:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD70:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR4]](<4 x s32>), [[PTR_ADD70]](p1) :: (store (<4 x s32>) into unknown-address + 64, align 4, addrspace 1)
; CHECK-NEXT: [[C71:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
- ; CHECK-NEXT: [[PTR_ADD71:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C71]](s64)
+ ; CHECK-NEXT: [[PTR_ADD71:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C71]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR5]](<4 x s32>), [[PTR_ADD71]](p1) :: (store (<4 x s32>) into unknown-address + 80, align 4, addrspace 1)
; CHECK-NEXT: [[C72:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
- ; CHECK-NEXT: [[PTR_ADD72:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C72]](s64)
+ ; CHECK-NEXT: [[PTR_ADD72:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C72]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR6]](<4 x s32>), [[PTR_ADD72]](p1) :: (store (<4 x s32>) into unknown-address + 96, align 4, addrspace 1)
; CHECK-NEXT: [[C73:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
- ; CHECK-NEXT: [[PTR_ADD73:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C73]](s64)
+ ; CHECK-NEXT: [[PTR_ADD73:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C73]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR7]](<4 x s32>), [[PTR_ADD73]](p1) :: (store (<4 x s32>) into unknown-address + 112, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD74:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD74:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C1]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR8]](<4 x s32>), [[PTR_ADD74]](p1) :: (store (<4 x s32>) into unknown-address + 128, align 4, addrspace 1)
; CHECK-NEXT: [[C74:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
- ; CHECK-NEXT: [[PTR_ADD75:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C74]](s64)
+ ; CHECK-NEXT: [[PTR_ADD75:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C74]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR9]](<4 x s32>), [[PTR_ADD75]](p1) :: (store (<4 x s32>) into unknown-address + 144, align 4, addrspace 1)
; CHECK-NEXT: [[C75:%[0-9]+]]:_(s64) = G_CONSTANT i64 160
- ; CHECK-NEXT: [[PTR_ADD76:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C75]](s64)
+ ; CHECK-NEXT: [[PTR_ADD76:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C75]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR10]](<4 x s32>), [[PTR_ADD76]](p1) :: (store (<4 x s32>) into unknown-address + 160, align 4, addrspace 1)
; CHECK-NEXT: [[C76:%[0-9]+]]:_(s64) = G_CONSTANT i64 176
- ; CHECK-NEXT: [[PTR_ADD77:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C76]](s64)
+ ; CHECK-NEXT: [[PTR_ADD77:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C76]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR11]](<4 x s32>), [[PTR_ADD77]](p1) :: (store (<4 x s32>) into unknown-address + 176, align 4, addrspace 1)
- ; CHECK-NEXT: [[PTR_ADD78:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD78:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR12]](<4 x s32>), [[PTR_ADD78]](p1) :: (store (<4 x s32>) into unknown-address + 192, align 4, addrspace 1)
; CHECK-NEXT: [[C77:%[0-9]+]]:_(s64) = G_CONSTANT i64 208
- ; CHECK-NEXT: [[PTR_ADD79:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C77]](s64)
+ ; CHECK-NEXT: [[PTR_ADD79:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C77]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR13]](<4 x s32>), [[PTR_ADD79]](p1) :: (store (<4 x s32>) into unknown-address + 208, align 4, addrspace 1)
; CHECK-NEXT: [[C78:%[0-9]+]]:_(s64) = G_CONSTANT i64 224
- ; CHECK-NEXT: [[PTR_ADD80:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C78]](s64)
+ ; CHECK-NEXT: [[PTR_ADD80:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C78]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR14]](<4 x s32>), [[PTR_ADD80]](p1) :: (store (<4 x s32>) into unknown-address + 224, align 4, addrspace 1)
; CHECK-NEXT: [[C79:%[0-9]+]]:_(s64) = G_CONSTANT i64 240
- ; CHECK-NEXT: [[PTR_ADD81:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C79]](s64)
+ ; CHECK-NEXT: [[PTR_ADD81:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY65]], [[C79]](s64)
; CHECK-NEXT: G_STORE [[BUILD_VECTOR15]](<4 x s32>), [[PTR_ADD81]](p1) :: (store (<4 x s32>) into unknown-address + 240, align 4, addrspace 1)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = COPY $sgpr2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant-32bit.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant-32bit.mir
index cd23abe..b91f1f4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant-32bit.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant-32bit.mir
@@ -16,15 +16,15 @@ body: |
; CI-NEXT: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[PTRTOINT]](s32), [[C]](s32)
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[MV]](p4) :: (load (s8), addrspace 6)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[MV]], [[C1]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 6)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C2]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[MV]], [[C3]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 6)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 6)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C2]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant.mir
index a5037ba..9c28eb0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant.mir
@@ -221,7 +221,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -234,7 +234,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -247,7 +247,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -302,7 +302,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -315,7 +315,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -328,7 +328,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -351,15 +351,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -374,15 +374,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -397,15 +397,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -494,7 +494,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 2, align 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -507,7 +507,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 2, align 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -520,7 +520,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 2, align 2, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -544,13 +544,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -563,13 +563,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -582,13 +582,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -712,16 +712,16 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -737,16 +737,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -762,16 +762,16 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -797,15 +797,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -814,15 +814,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -840,15 +840,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -857,15 +857,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -883,15 +883,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -900,15 +900,15 @@ body: |
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1038,22 +1038,22 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s16) from unknown-address + 8, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s16) from unknown-address + 10, addrspace 4)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1067,22 +1067,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s16) from unknown-address + 8, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s16) from unknown-address + 10, addrspace 4)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1096,22 +1096,22 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s16) from unknown-address + 8, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s16) from unknown-address + 10, addrspace 4)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1135,15 +1135,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1151,30 +1151,30 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -1190,15 +1190,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1206,30 +1206,30 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -1245,15 +1245,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1261,30 +1261,30 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -1310,7 +1310,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s32) from unknown-address + 16, addrspace 4)
; CI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -1323,7 +1323,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s32) from unknown-address + 16, addrspace 4)
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -1336,7 +1336,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s32) from unknown-address + 16, addrspace 4)
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -1359,7 +1359,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 4)
; CI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; CI-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -1375,7 +1375,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 4)
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; VI-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -1391,7 +1391,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 4)
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -1488,15 +1488,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1504,45 +1504,45 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -1558,15 +1558,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1574,45 +1574,45 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -1628,15 +1628,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1644,45 +1644,45 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX9-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -1804,15 +1804,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1821,15 +1821,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1848,15 +1848,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1865,15 +1865,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1892,15 +1892,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1909,15 +1909,15 @@ body: |
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2039,16 +2039,16 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2065,16 +2065,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2091,16 +2091,16 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2127,15 +2127,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2144,15 +2144,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2171,15 +2171,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2188,15 +2188,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2215,15 +2215,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2232,15 +2232,15 @@ body: |
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2300,7 +2300,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -2314,7 +2314,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -2328,7 +2328,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -2352,15 +2352,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2376,15 +2376,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2400,15 +2400,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2500,7 +2500,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -2513,7 +2513,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -2526,7 +2526,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -2655,13 +2655,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -2697,13 +2697,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -2737,13 +2737,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -2821,7 +2821,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -2834,7 +2834,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -2847,7 +2847,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -2871,15 +2871,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2894,15 +2894,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2917,15 +2917,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3359,7 +3359,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -3376,7 +3376,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -3394,7 +3394,7 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -3416,15 +3416,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3443,15 +3443,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3470,16 +3470,16 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3589,10 +3589,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), align 4, addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, align 4, addrspace 4)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -3624,10 +3624,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), align 4, addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, align 4, addrspace 4)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -3660,11 +3660,11 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), align 4, addrspace 4)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, align 4, addrspace 4)
; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -3700,10 +3700,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -3735,10 +3735,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -3771,11 +3771,11 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -3811,22 +3811,22 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3860,22 +3860,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3909,24 +3909,24 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -4026,13 +4026,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -4055,13 +4055,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -4085,15 +4085,15 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -4117,29 +4117,29 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4164,29 +4164,29 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4211,32 +4211,32 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4358,15 +4358,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -4379,15 +4379,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -4400,15 +4400,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -4431,15 +4431,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4447,15 +4447,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -4470,15 +4470,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4486,15 +4486,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -4509,15 +4509,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4525,15 +4525,15 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -4901,16 +4901,16 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -4919,16 +4919,16 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s16) from unknown-address + 8, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s16) from unknown-address + 10, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; CI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s16) from unknown-address + 12, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s16) from unknown-address + 14, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -4945,16 +4945,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -4963,16 +4963,16 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s16) from unknown-address + 8, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s16) from unknown-address + 10, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; VI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s16) from unknown-address + 12, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s16) from unknown-address + 14, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -4989,16 +4989,16 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5007,16 +5007,16 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s16) from unknown-address + 8, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s16) from unknown-address + 10, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; GFX9-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s16) from unknown-address + 12, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s16) from unknown-address + 14, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -5043,15 +5043,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5060,15 +5060,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5079,30 +5079,30 @@ body: |
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; CI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; CI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -5121,15 +5121,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5138,15 +5138,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5157,30 +5157,30 @@ body: |
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; VI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -5199,15 +5199,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5216,15 +5216,15 @@ body: |
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5235,30 +5235,30 @@ body: |
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; GFX9-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; GFX9-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; GFX9-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -5332,7 +5332,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (<2 x s64>), align 8, addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load (s64) from unknown-address + 16, addrspace 4)
; CI-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -5346,7 +5346,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (<2 x s64>), align 8, addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load (s64) from unknown-address + 16, addrspace 4)
; VI-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -5360,7 +5360,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (<2 x s64>), align 8, addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load (s64) from unknown-address + 16, addrspace 4)
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -5386,15 +5386,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5403,15 +5403,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5422,30 +5422,30 @@ body: |
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; CI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; CI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -5456,30 +5456,30 @@ body: |
; CI-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; CI-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; CI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p4) :: (load (s8) from unknown-address + 16, addrspace 4)
- ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p4) :: (load (s8) from unknown-address + 17, addrspace 4)
; CI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p4) :: (load (s8) from unknown-address + 18, addrspace 4)
- ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load (s8) from unknown-address + 19, addrspace 4)
; CI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; CI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; CI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; CI-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p4) :: (load (s8) from unknown-address + 20, addrspace 4)
- ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p4) :: (load (s8) from unknown-address + 21, addrspace 4)
; CI-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p4) :: (load (s8) from unknown-address + 22, addrspace 4)
- ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load (s8) from unknown-address + 23, addrspace 4)
; CI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -5500,15 +5500,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5517,15 +5517,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5536,30 +5536,30 @@ body: |
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; VI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -5570,30 +5570,30 @@ body: |
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p4) :: (load (s8) from unknown-address + 16, addrspace 4)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p4) :: (load (s8) from unknown-address + 17, addrspace 4)
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p4) :: (load (s8) from unknown-address + 18, addrspace 4)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load (s8) from unknown-address + 19, addrspace 4)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; VI-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p4) :: (load (s8) from unknown-address + 20, addrspace 4)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p4) :: (load (s8) from unknown-address + 21, addrspace 4)
; VI-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p4) :: (load (s8) from unknown-address + 22, addrspace 4)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load (s8) from unknown-address + 23, addrspace 4)
; VI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -5614,15 +5614,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5631,15 +5631,15 @@ body: |
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5650,30 +5650,30 @@ body: |
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; GFX9-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; GFX9-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; GFX9-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -5684,30 +5684,30 @@ body: |
; GFX9-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; GFX9-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; GFX9-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; GFX9-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p4) :: (load (s8) from unknown-address + 16, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p4) :: (load (s8) from unknown-address + 17, addrspace 4)
; GFX9-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; GFX9-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p4) :: (load (s8) from unknown-address + 18, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load (s8) from unknown-address + 19, addrspace 4)
; GFX9-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; GFX9-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; GFX9-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; GFX9-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p4) :: (load (s8) from unknown-address + 20, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p4) :: (load (s8) from unknown-address + 21, addrspace 4)
; GFX9-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; GFX9-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p4) :: (load (s8) from unknown-address + 22, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load (s8) from unknown-address + 23, addrspace 4)
; GFX9-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -5802,15 +5802,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5819,15 +5819,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5838,30 +5838,30 @@ body: |
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; CI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; CI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -5872,30 +5872,30 @@ body: |
; CI-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; CI-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; CI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p4) :: (load (s8) from unknown-address + 16, addrspace 4)
- ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p4) :: (load (s8) from unknown-address + 17, addrspace 4)
; CI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p4) :: (load (s8) from unknown-address + 18, addrspace 4)
- ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load (s8) from unknown-address + 19, addrspace 4)
; CI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; CI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; CI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; CI-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p4) :: (load (s8) from unknown-address + 20, addrspace 4)
- ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p4) :: (load (s8) from unknown-address + 21, addrspace 4)
; CI-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p4) :: (load (s8) from unknown-address + 22, addrspace 4)
- ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load (s8) from unknown-address + 23, addrspace 4)
; CI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -5906,30 +5906,30 @@ body: |
; CI-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; CI-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
; CI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; CI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; CI-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p4) :: (load (s8) from unknown-address + 24, addrspace 4)
- ; CI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p4) :: (load (s8) from unknown-address + 25, addrspace 4)
; CI-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; CI-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; CI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p4) :: (load (s8) from unknown-address + 26, addrspace 4)
- ; CI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p4) :: (load (s8) from unknown-address + 27, addrspace 4)
; CI-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; CI-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; CI-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; CI-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; CI-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; CI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p4) :: (load (s8) from unknown-address + 28, addrspace 4)
- ; CI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p4) :: (load (s8) from unknown-address + 29, addrspace 4)
; CI-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; CI-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; CI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p4) :: (load (s8) from unknown-address + 30, addrspace 4)
- ; CI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p4) :: (load (s8) from unknown-address + 31, addrspace 4)
; CI-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; CI-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -5948,15 +5948,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5965,15 +5965,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5984,30 +5984,30 @@ body: |
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; VI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -6018,30 +6018,30 @@ body: |
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p4) :: (load (s8) from unknown-address + 16, addrspace 4)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p4) :: (load (s8) from unknown-address + 17, addrspace 4)
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p4) :: (load (s8) from unknown-address + 18, addrspace 4)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load (s8) from unknown-address + 19, addrspace 4)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; VI-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p4) :: (load (s8) from unknown-address + 20, addrspace 4)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p4) :: (load (s8) from unknown-address + 21, addrspace 4)
; VI-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p4) :: (load (s8) from unknown-address + 22, addrspace 4)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load (s8) from unknown-address + 23, addrspace 4)
; VI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -6052,30 +6052,30 @@ body: |
; VI-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; VI-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
; VI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; VI-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p4) :: (load (s8) from unknown-address + 24, addrspace 4)
- ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p4) :: (load (s8) from unknown-address + 25, addrspace 4)
; VI-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; VI-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p4) :: (load (s8) from unknown-address + 26, addrspace 4)
- ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p4) :: (load (s8) from unknown-address + 27, addrspace 4)
; VI-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; VI-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; VI-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; VI-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; VI-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p4) :: (load (s8) from unknown-address + 28, addrspace 4)
- ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p4) :: (load (s8) from unknown-address + 29, addrspace 4)
; VI-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; VI-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p4) :: (load (s8) from unknown-address + 30, addrspace 4)
- ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p4) :: (load (s8) from unknown-address + 31, addrspace 4)
; VI-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; VI-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -6094,15 +6094,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6111,15 +6111,15 @@ body: |
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6130,30 +6130,30 @@ body: |
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; GFX9-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; GFX9-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; GFX9-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -6164,30 +6164,30 @@ body: |
; GFX9-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; GFX9-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; GFX9-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; GFX9-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p4) :: (load (s8) from unknown-address + 16, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p4) :: (load (s8) from unknown-address + 17, addrspace 4)
; GFX9-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; GFX9-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p4) :: (load (s8) from unknown-address + 18, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load (s8) from unknown-address + 19, addrspace 4)
; GFX9-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; GFX9-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; GFX9-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; GFX9-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p4) :: (load (s8) from unknown-address + 20, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p4) :: (load (s8) from unknown-address + 21, addrspace 4)
; GFX9-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; GFX9-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p4) :: (load (s8) from unknown-address + 22, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load (s8) from unknown-address + 23, addrspace 4)
; GFX9-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -6198,30 +6198,30 @@ body: |
; GFX9-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; GFX9-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
; GFX9-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; GFX9-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; GFX9-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; GFX9-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p4) :: (load (s8) from unknown-address + 24, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p4) :: (load (s8) from unknown-address + 25, addrspace 4)
; GFX9-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; GFX9-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; GFX9-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p4) :: (load (s8) from unknown-address + 26, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; GFX9-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p4) :: (load (s8) from unknown-address + 27, addrspace 4)
; GFX9-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; GFX9-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; GFX9-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; GFX9-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; GFX9-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; GFX9-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p4) :: (load (s8) from unknown-address + 28, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p4) :: (load (s8) from unknown-address + 29, addrspace 4)
; GFX9-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; GFX9-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; GFX9-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p4) :: (load (s8) from unknown-address + 30, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; GFX9-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p4) :: (load (s8) from unknown-address + 31, addrspace 4)
; GFX9-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -6386,15 +6386,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6402,45 +6402,45 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -6456,15 +6456,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6472,45 +6472,45 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -6526,15 +6526,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6542,45 +6542,45 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 4)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 4)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX9-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 4)
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 4)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -6674,15 +6674,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6690,15 +6690,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6714,15 +6714,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6730,15 +6730,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6754,15 +6754,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 4)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 4)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 4)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6770,15 +6770,15 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 4)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 4)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 4)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -7080,15 +7080,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7096,15 +7096,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -7119,15 +7119,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7135,15 +7135,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -7158,15 +7158,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7174,15 +7174,15 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -7207,15 +7207,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -7228,15 +7228,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -7249,15 +7249,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -7373,15 +7373,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7389,30 +7389,30 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -7421,43 +7421,43 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 1)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 1)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; CI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; CI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p4) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p4) :: (load (s8) from unknown-address + 17, addrspace 1)
; CI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p4) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load (s8) from unknown-address + 19, addrspace 1)
; CI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; CI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; CI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p4) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p4) :: (load (s8) from unknown-address + 21, addrspace 1)
; CI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p4) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load (s8) from unknown-address + 23, addrspace 1)
; CI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -7476,15 +7476,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7492,30 +7492,30 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -7524,43 +7524,43 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 1)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 1)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p4) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p4) :: (load (s8) from unknown-address + 17, addrspace 1)
; VI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p4) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load (s8) from unknown-address + 19, addrspace 1)
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p4) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p4) :: (load (s8) from unknown-address + 21, addrspace 1)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p4) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load (s8) from unknown-address + 23, addrspace 1)
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -7579,15 +7579,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s8), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p4) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7595,30 +7595,30 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p4) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p4) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -7627,43 +7627,43 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p4) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p4) :: (load (s8) from unknown-address + 13, addrspace 1)
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p4) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load (s8) from unknown-address + 15, addrspace 1)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; GFX9-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; GFX9-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p4) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p4) :: (load (s8) from unknown-address + 17, addrspace 1)
; GFX9-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; GFX9-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p4) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load (s8) from unknown-address + 19, addrspace 1)
; GFX9-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; GFX9-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; GFX9-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
+ ; GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
; GFX9-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p4) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; GFX9-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p4) :: (load (s8) from unknown-address + 21, addrspace 1)
; GFX9-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; GFX9-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p4) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load (s8) from unknown-address + 23, addrspace 1)
; GFX9-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -7695,43 +7695,43 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s16) from unknown-address + 14, addrspace 1)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s16) from unknown-address + 16, addrspace 1)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load (s16) from unknown-address + 18, addrspace 1)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s16) from unknown-address + 20, addrspace 1)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s16) from unknown-address + 22, addrspace 1)
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -7748,43 +7748,43 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s16) from unknown-address + 10, addrspace 1)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s16) from unknown-address + 14, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s16) from unknown-address + 16, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load (s16) from unknown-address + 18, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s16) from unknown-address + 20, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s16) from unknown-address + 22, addrspace 1)
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -7801,43 +7801,43 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p4) :: (load (s16), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p4) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p4) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p4) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load (s16) from unknown-address + 14, addrspace 1)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p4) :: (load (s16) from unknown-address + 16, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load (s16) from unknown-address + 18, addrspace 1)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p4) :: (load (s16) from unknown-address + 20, addrspace 1)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load (s16) from unknown-address + 22, addrspace 1)
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -7868,7 +7868,7 @@ body: |
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p4) :: (load (<3 x s32>), align 4, addrspace 1)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; CI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; CI-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -7883,7 +7883,7 @@ body: |
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p4) :: (load (<3 x s32>), align 4, addrspace 1)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; VI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -7898,7 +7898,7 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p4) :: (load (<3 x s32>), align 4, addrspace 1)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -7926,7 +7926,7 @@ body: |
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p4) :: (load (<3 x s32>), align 16, addrspace 1)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; CI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; CI-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -7941,7 +7941,7 @@ body: |
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p4) :: (load (<3 x s32>), align 16, addrspace 1)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; VI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -7956,7 +7956,7 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p4) :: (load (<3 x s32>), align 16, addrspace 1)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir
index e0a225c..16ce48b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir
@@ -459,7 +459,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -472,7 +472,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -506,7 +506,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -519,7 +519,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -532,7 +532,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -622,7 +622,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -635,7 +635,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -669,7 +669,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -682,7 +682,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -695,7 +695,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -718,15 +718,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -741,15 +741,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -785,15 +785,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -808,15 +808,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -831,15 +831,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -864,7 +864,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 4, align 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
@@ -887,7 +887,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 4, align 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
@@ -975,7 +975,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -986,7 +986,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1049,7 +1049,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1060,7 +1060,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1123,15 +1123,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1144,15 +1144,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1186,16 +1186,16 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1211,16 +1211,16 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1236,16 +1236,16 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1271,15 +1271,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1287,15 +1287,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1310,15 +1310,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1326,15 +1326,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1370,15 +1370,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1387,15 +1387,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1413,15 +1413,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1430,15 +1430,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1456,15 +1456,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1473,15 +1473,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1509,10 +1509,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -1524,10 +1524,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -1597,10 +1597,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -1612,10 +1612,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -1685,10 +1685,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -1700,10 +1700,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -1773,22 +1773,22 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1802,22 +1802,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1855,22 +1855,22 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX9PLUS-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1884,22 +1884,22 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX11PLUS-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1913,22 +1913,22 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1952,15 +1952,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1968,30 +1968,30 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2007,15 +2007,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2023,30 +2023,30 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2086,15 +2086,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2102,30 +2102,30 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX9PLUS-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2141,15 +2141,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2157,30 +2157,30 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX11PLUS-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2196,15 +2196,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2212,30 +2212,30 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2261,16 +2261,16 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s160) = G_BITCAST [[BUILD_VECTOR]](<5 x s32>)
@@ -2282,16 +2282,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s160) = G_BITCAST [[BUILD_VECTOR]](<5 x s32>)
@@ -2303,7 +2303,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 16)
; GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX9PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2316,7 +2316,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 16)
; GFX11PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX11PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2329,7 +2329,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 16)
; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2342,7 +2342,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 16)
; UNALIGNED_GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; UNALIGNED_GFX9PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2355,7 +2355,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 16)
; UNALIGNED_GFX11PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; UNALIGNED_GFX11PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2368,7 +2368,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 16)
; UNALIGNED_GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2391,22 +2391,22 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<7 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s224) = G_BITCAST [[BUILD_VECTOR]](<7 x s32>)
@@ -2420,22 +2420,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<7 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s224) = G_BITCAST [[BUILD_VECTOR]](<7 x s32>)
@@ -2449,7 +2449,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<3 x s32>) from unknown-address + 16, align 4)
; GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX9PLUS-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2465,7 +2465,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<3 x s32>) from unknown-address + 16, align 4)
; GFX11PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX11PLUS-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2481,7 +2481,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<3 x s32>) from unknown-address + 16, align 4)
; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX12-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2497,7 +2497,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<3 x s32>) from unknown-address + 16, align 4)
; UNALIGNED_GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; UNALIGNED_GFX9PLUS-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2513,7 +2513,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<3 x s32>) from unknown-address + 16, align 4)
; UNALIGNED_GFX11PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; UNALIGNED_GFX11PLUS-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2529,7 +2529,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<3 x s32>) from unknown-address + 16, align 4)
; UNALIGNED_GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2558,13 +2558,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -2576,13 +2576,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -2652,13 +2652,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -2670,13 +2670,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -2746,15 +2746,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2762,45 +2762,45 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -2816,15 +2816,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2832,45 +2832,45 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -2910,15 +2910,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2926,45 +2926,45 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX9PLUS-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX9PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -2980,15 +2980,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2996,45 +2996,45 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX11PLUS-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX11PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -3050,15 +3050,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3066,45 +3066,45 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX12-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -3130,25 +3130,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -3160,25 +3160,25 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -3190,7 +3190,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX9PLUS-NEXT: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -3202,7 +3202,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX11PLUS-NEXT: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -3214,7 +3214,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -3226,7 +3226,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX9PLUS-NEXT: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -3238,7 +3238,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX11PLUS-NEXT: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -3250,7 +3250,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -3272,7 +3272,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -3283,7 +3283,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -3346,7 +3346,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -3357,7 +3357,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -3420,15 +3420,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3436,15 +3436,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3459,15 +3459,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3475,15 +3475,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3519,15 +3519,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3536,15 +3536,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3563,15 +3563,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3580,15 +3580,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3607,15 +3607,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3624,15 +3624,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3727,7 +3727,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p4)
@@ -3738,7 +3738,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p4)
@@ -3801,7 +3801,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p4)
@@ -3812,7 +3812,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p4)
@@ -3875,15 +3875,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3896,15 +3896,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3938,16 +3938,16 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3964,16 +3964,16 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3990,16 +3990,16 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -4026,15 +4026,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4042,15 +4042,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -4065,15 +4065,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4081,15 +4081,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -4125,15 +4125,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4142,15 +4142,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -4169,15 +4169,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4186,15 +4186,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -4213,15 +4213,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4230,15 +4230,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -4333,7 +4333,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4347,7 +4347,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4382,7 +4382,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4396,7 +4396,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4410,7 +4410,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4434,15 +4434,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4458,15 +4458,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4503,15 +4503,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4527,15 +4527,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4551,15 +4551,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4721,7 +4721,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4734,7 +4734,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4768,7 +4768,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4781,7 +4781,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4794,7 +4794,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -5073,13 +5073,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -5115,13 +5115,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -5155,7 +5155,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16), align 1)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 2)
; GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -5190,7 +5190,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16), align 1)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 2)
; GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -5225,7 +5225,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16), align 1)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 2)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -5260,13 +5260,13 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -5300,13 +5300,13 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -5340,13 +5340,13 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -5459,7 +5459,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -5472,7 +5472,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -5506,7 +5506,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -5519,7 +5519,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -5532,7 +5532,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -5556,15 +5556,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5579,15 +5579,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5623,15 +5623,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5646,15 +5646,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5669,15 +5669,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5703,7 +5703,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -5714,7 +5714,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -5778,13 +5778,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -5795,13 +5795,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -5865,25 +5865,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -5894,25 +5894,25 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -5923,7 +5923,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -5934,7 +5934,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -5945,7 +5945,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -5956,7 +5956,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -5967,7 +5967,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -5978,7 +5978,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -6067,7 +6067,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -6084,7 +6084,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -6123,7 +6123,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -6136,7 +6136,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -6149,7 +6149,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -6171,15 +6171,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6198,15 +6198,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6246,16 +6246,16 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6269,16 +6269,16 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6292,16 +6292,16 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6325,7 +6325,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<2 x s16>), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 4, align 4)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -6358,7 +6358,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<2 x s16>), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 4, align 4)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -6535,7 +6535,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<2 x s16>))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 4, align 4)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -6568,7 +6568,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<2 x s16>))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 4, align 4)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -6602,11 +6602,11 @@ body: |
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 4)
; GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4, align 4)
; GFX9PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6631,11 +6631,11 @@ body: |
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 4)
; GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; GFX11PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4, align 4)
; GFX11PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6660,11 +6660,11 @@ body: |
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 4)
; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4, align 4)
; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6689,11 +6689,11 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 4)
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4, align 4)
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6718,11 +6718,11 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 4)
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4, align 4)
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6747,11 +6747,11 @@ body: |
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 4)
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4, align 4)
; UNALIGNED_GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6787,10 +6787,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -6822,10 +6822,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -6858,11 +6858,11 @@ body: |
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; GFX9PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6887,11 +6887,11 @@ body: |
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; GFX11PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; GFX11PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6916,11 +6916,11 @@ body: |
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6945,11 +6945,11 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6974,11 +6974,11 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -7003,11 +7003,11 @@ body: |
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; UNALIGNED_GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -7043,22 +7043,22 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -7092,22 +7092,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -7142,11 +7142,11 @@ body: |
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 1)
; GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2, align 1)
; GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4, align 1)
; GFX9PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -7171,11 +7171,11 @@ body: |
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 1)
; GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2, align 1)
; GFX11PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4, align 1)
; GFX11PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -7200,11 +7200,11 @@ body: |
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 1)
; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2, align 1)
; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4, align 1)
; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -7228,24 +7228,24 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -7271,24 +7271,24 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -7314,24 +7314,24 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -7369,7 +7369,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<2 x s16>), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s16>) from unknown-address + 4)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -7380,7 +7380,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<2 x s16>), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s16>) from unknown-address + 4)
; VI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -7443,7 +7443,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<2 x s16>))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s16>) from unknown-address + 4)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -7454,7 +7454,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<2 x s16>))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s16>) from unknown-address + 4)
; VI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -7517,7 +7517,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -7527,9 +7527,9 @@ body: |
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; CI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[LOAD2]], [[C1]]
; CI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[LOAD3]], [[C1]]
@@ -7545,7 +7545,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -7555,9 +7555,9 @@ body: |
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; VI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[LOAD2]], [[C1]]
; VI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[LOAD3]], [[C1]]
@@ -7595,15 +7595,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -7618,15 +7618,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -7641,15 +7641,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
; UNALIGNED_GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; UNALIGNED_GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -7673,15 +7673,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -7693,15 +7693,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL2]]
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -7719,15 +7719,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -7739,15 +7739,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL2]]
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -7786,32 +7786,32 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX9PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -7827,32 +7827,32 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX11PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -7868,32 +7868,32 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -7919,13 +7919,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s16>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -7937,13 +7937,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s16>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -8013,7 +8013,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8024,7 +8024,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8087,7 +8087,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8098,7 +8098,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8162,7 +8162,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8173,7 +8173,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8236,10 +8236,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -8250,10 +8250,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -8318,10 +8318,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -8332,10 +8332,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -8398,13 +8398,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -8415,13 +8415,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -8484,13 +8484,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -8501,13 +8501,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -8570,13 +8570,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -8587,13 +8587,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -8656,25 +8656,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -8685,25 +8685,25 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -8714,7 +8714,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -8725,7 +8725,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -8736,7 +8736,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -8747,7 +8747,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -8758,7 +8758,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -8769,7 +8769,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -8790,49 +8790,49 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; CI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CI-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load (s32) from unknown-address + 32, align 32)
; CI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 36
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; CI-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load (s32) from unknown-address + 36)
; CI-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s64)
; CI-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load (s32) from unknown-address + 40, align 8)
; CI-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 44
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C10]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s64)
; CI-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s32) from unknown-address + 44)
; CI-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s64)
; CI-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load (s32) from unknown-address + 48, align 16)
; CI-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 52
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s64)
; CI-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load (s32) from unknown-address + 52)
; CI-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C13]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s64)
; CI-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load (s32) from unknown-address + 56, align 8)
; CI-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 60
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C14]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s64)
; CI-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s32) from unknown-address + 60)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
@@ -8843,49 +8843,49 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load (s32) from unknown-address + 32, align 32)
; VI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 36
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; VI-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load (s32) from unknown-address + 36)
; VI-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s64)
; VI-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load (s32) from unknown-address + 40, align 8)
; VI-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 44
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C10]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s64)
; VI-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s32) from unknown-address + 44)
; VI-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s64)
; VI-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load (s32) from unknown-address + 48, align 16)
; VI-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 52
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s64)
; VI-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load (s32) from unknown-address + 52)
; VI-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C13]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s64)
; VI-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load (s32) from unknown-address + 56, align 8)
; VI-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 60
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C14]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s64)
; VI-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s32) from unknown-address + 60)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
@@ -8896,13 +8896,13 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<4 x s32>) from unknown-address + 32, align 32)
; GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (<4 x s32>) from unknown-address + 48)
; GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -8913,13 +8913,13 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<4 x s32>) from unknown-address + 32, align 32)
; GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (<4 x s32>) from unknown-address + 48)
; GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -8930,13 +8930,13 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<4 x s32>) from unknown-address + 32, align 32)
; GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (<4 x s32>) from unknown-address + 48)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -8947,13 +8947,13 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<4 x s32>) from unknown-address + 32, align 32)
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (<4 x s32>) from unknown-address + 48)
; UNALIGNED_GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; UNALIGNED_GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -8964,13 +8964,13 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<4 x s32>) from unknown-address + 32, align 32)
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (<4 x s32>) from unknown-address + 48)
; UNALIGNED_GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; UNALIGNED_GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -8981,13 +8981,13 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<4 x s32>) from unknown-address + 32, align 32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (<4 x s32>) from unknown-address + 48)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -9008,13 +9008,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -9026,13 +9026,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -9096,13 +9096,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -9114,13 +9114,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -9184,13 +9184,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -9202,13 +9202,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -9272,29 +9272,29 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s16) from unknown-address + 12)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s16) from unknown-address + 14)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -9308,29 +9308,29 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s16) from unknown-address + 12)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s16) from unknown-address + 14)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -9365,16 +9365,16 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9383,16 +9383,16 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s16) from unknown-address + 12)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s16) from unknown-address + 14)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -9409,16 +9409,16 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9427,16 +9427,16 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s16) from unknown-address + 12)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s16) from unknown-address + 14)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -9453,16 +9453,16 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9471,16 +9471,16 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s16) from unknown-address + 8)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s16) from unknown-address + 10)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; UNALIGNED_GFX12-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s16) from unknown-address + 12)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s16) from unknown-address + 14)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -9507,15 +9507,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -9523,15 +9523,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -9539,29 +9539,29 @@ body: |
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -9577,15 +9577,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -9593,15 +9593,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -9609,29 +9609,29 @@ body: |
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -9668,15 +9668,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -9685,15 +9685,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -9704,30 +9704,30 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; UNALIGNED_GFX9PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -9746,15 +9746,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -9763,15 +9763,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -9782,30 +9782,30 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; UNALIGNED_GFX11PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -9824,15 +9824,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -9841,15 +9841,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -9860,30 +9860,30 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; UNALIGNED_GFX12-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX12-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX12-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -9912,19 +9912,19 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -9938,19 +9938,19 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -9964,7 +9964,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 16)
; GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -9978,7 +9978,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 16)
; GFX11PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -9992,7 +9992,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 16)
; GFX12-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10006,7 +10006,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 16)
; UNALIGNED_GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; UNALIGNED_GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10020,7 +10020,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 16)
; UNALIGNED_GFX11PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; UNALIGNED_GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10034,7 +10034,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 16)
; UNALIGNED_GFX12-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; UNALIGNED_GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10060,19 +10060,19 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 8)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10086,19 +10086,19 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 8)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10112,7 +10112,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16)
; GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10126,7 +10126,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16)
; GFX11PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10140,7 +10140,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16)
; GFX12-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10154,7 +10154,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16)
; UNALIGNED_GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; UNALIGNED_GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10168,7 +10168,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16)
; UNALIGNED_GFX11PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; UNALIGNED_GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10182,7 +10182,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16)
; UNALIGNED_GFX12-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; UNALIGNED_GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10208,15 +10208,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -10224,15 +10224,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -10240,29 +10240,29 @@ body: |
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -10270,29 +10270,29 @@ body: |
; CI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR8]](s32), [[OR11]](s32)
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; CI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; CI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; CI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; CI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; CI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; CI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -10310,15 +10310,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -10326,15 +10326,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -10342,29 +10342,29 @@ body: |
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -10372,29 +10372,29 @@ body: |
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR8]](s32), [[OR11]](s32)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; VI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -10412,7 +10412,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 1)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 1)
; GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10426,7 +10426,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 1)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 1)
; GFX11PLUS-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX11PLUS-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10440,7 +10440,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 1)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 1)
; GFX12-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -10454,15 +10454,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -10471,15 +10471,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -10490,30 +10490,30 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; UNALIGNED_GFX9PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -10524,30 +10524,30 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; UNALIGNED_GFX9PLUS-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -10568,15 +10568,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -10585,15 +10585,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -10604,30 +10604,30 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; UNALIGNED_GFX11PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -10638,30 +10638,30 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; UNALIGNED_GFX11PLUS-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -10682,15 +10682,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -10699,15 +10699,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -10718,30 +10718,30 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; UNALIGNED_GFX12-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX12-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX12-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -10752,30 +10752,30 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; UNALIGNED_GFX12-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; UNALIGNED_GFX12-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; UNALIGNED_GFX12-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; UNALIGNED_GFX12-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; UNALIGNED_GFX12-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; UNALIGNED_GFX12-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; UNALIGNED_GFX12-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -10808,25 +10808,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; CI-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -10838,25 +10838,25 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; VI-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -10868,7 +10868,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -10879,7 +10879,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -10890,7 +10890,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -10901,7 +10901,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; UNALIGNED_GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; UNALIGNED_GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -10912,7 +10912,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; UNALIGNED_GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; UNALIGNED_GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -10923,7 +10923,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; UNALIGNED_GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -10944,25 +10944,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 8)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; CI-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -10974,25 +10974,25 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 8)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; VI-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -11004,7 +11004,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 8)
; GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -11015,7 +11015,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 8)
; GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -11026,7 +11026,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 8)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -11037,7 +11037,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 8)
; UNALIGNED_GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; UNALIGNED_GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -11048,7 +11048,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 8)
; UNALIGNED_GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; UNALIGNED_GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -11059,7 +11059,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 8)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; UNALIGNED_GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -11080,15 +11080,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11096,15 +11096,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11112,29 +11112,29 @@ body: |
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -11142,29 +11142,29 @@ body: |
; CI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR8]](s32), [[OR11]](s32)
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; CI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; CI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; CI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; CI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; CI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; CI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -11172,29 +11172,29 @@ body: |
; CI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[OR15]]
; CI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR14]](s32), [[OR17]](s32)
; CI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CI-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p0) :: (load (s8) from unknown-address + 24)
- ; CI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p0) :: (load (s8) from unknown-address + 25)
; CI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; CI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD18]]
- ; CI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p0) :: (load (s8) from unknown-address + 26)
- ; CI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load (s8) from unknown-address + 27)
; CI-NEXT: [[SHL19:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; CI-NEXT: [[OR19:%[0-9]+]]:_(s32) = G_OR [[SHL19]], [[ZEXTLOAD20]]
; CI-NEXT: [[SHL20:%[0-9]+]]:_(s32) = G_SHL [[OR19]], [[C3]](s32)
; CI-NEXT: [[OR20:%[0-9]+]]:_(s32) = G_OR [[SHL20]], [[OR18]]
- ; CI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p0) :: (load (s8) from unknown-address + 28)
- ; CI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p0) :: (load (s8) from unknown-address + 29)
; CI-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; CI-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD21]]
- ; CI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p0) :: (load (s8) from unknown-address + 30)
- ; CI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load (s8) from unknown-address + 31)
; CI-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; CI-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD23]]
@@ -11210,15 +11210,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11226,15 +11226,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11242,29 +11242,29 @@ body: |
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -11272,29 +11272,29 @@ body: |
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR8]](s32), [[OR11]](s32)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; VI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -11302,29 +11302,29 @@ body: |
; VI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[OR15]]
; VI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR14]](s32), [[OR17]](s32)
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p0) :: (load (s8) from unknown-address + 24)
- ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p0) :: (load (s8) from unknown-address + 25)
; VI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; VI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD18]]
- ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p0) :: (load (s8) from unknown-address + 26)
- ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load (s8) from unknown-address + 27)
; VI-NEXT: [[SHL19:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; VI-NEXT: [[OR19:%[0-9]+]]:_(s32) = G_OR [[SHL19]], [[ZEXTLOAD20]]
; VI-NEXT: [[SHL20:%[0-9]+]]:_(s32) = G_SHL [[OR19]], [[C3]](s32)
; VI-NEXT: [[OR20:%[0-9]+]]:_(s32) = G_OR [[SHL20]], [[OR18]]
- ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p0) :: (load (s8) from unknown-address + 28)
- ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p0) :: (load (s8) from unknown-address + 29)
; VI-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; VI-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD21]]
- ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p0) :: (load (s8) from unknown-address + 30)
- ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load (s8) from unknown-address + 31)
; VI-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; VI-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD23]]
@@ -11340,7 +11340,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 1)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 1)
; GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -11351,7 +11351,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 1)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 1)
; GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX11PLUS-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -11362,7 +11362,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 1)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 1)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -11373,15 +11373,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11390,15 +11390,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11409,30 +11409,30 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; UNALIGNED_GFX9PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11444,30 +11444,30 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; UNALIGNED_GFX9PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[OR6]](s64), [[OR13]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -11477,30 +11477,30 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C5]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p0) :: (load (s8) from unknown-address + 24)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p0) :: (load (s8) from unknown-address + 25)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p0) :: (load (s8) from unknown-address + 26)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load (s8) from unknown-address + 27)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p0) :: (load (s8) from unknown-address + 28)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p0) :: (load (s8) from unknown-address + 29)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p0) :: (load (s8) from unknown-address + 30)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load (s8) from unknown-address + 31)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -11520,15 +11520,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11537,15 +11537,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11556,30 +11556,30 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; UNALIGNED_GFX11PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11591,30 +11591,30 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; UNALIGNED_GFX11PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[OR6]](s64), [[OR13]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -11624,30 +11624,30 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C5]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p0) :: (load (s8) from unknown-address + 24)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p0) :: (load (s8) from unknown-address + 25)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p0) :: (load (s8) from unknown-address + 26)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load (s8) from unknown-address + 27)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p0) :: (load (s8) from unknown-address + 28)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p0) :: (load (s8) from unknown-address + 29)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p0) :: (load (s8) from unknown-address + 30)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load (s8) from unknown-address + 31)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -11667,15 +11667,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11684,15 +11684,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11703,30 +11703,30 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; UNALIGNED_GFX12-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX12-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX12-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11738,30 +11738,30 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; UNALIGNED_GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[OR6]](s64), [[OR13]](s64)
; UNALIGNED_GFX12-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p0) :: (load (s8) from unknown-address + 16)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p0) :: (load (s8) from unknown-address + 17)
; UNALIGNED_GFX12-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p0) :: (load (s8) from unknown-address + 18)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load (s8) from unknown-address + 19)
; UNALIGNED_GFX12-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; UNALIGNED_GFX12-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; UNALIGNED_GFX12-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p0) :: (load (s8) from unknown-address + 20)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p0) :: (load (s8) from unknown-address + 21)
; UNALIGNED_GFX12-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p0) :: (load (s8) from unknown-address + 22)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load (s8) from unknown-address + 23)
; UNALIGNED_GFX12-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -11771,30 +11771,30 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; UNALIGNED_GFX12-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p0) :: (load (s8) from unknown-address + 24)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p0) :: (load (s8) from unknown-address + 25)
; UNALIGNED_GFX12-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p0) :: (load (s8) from unknown-address + 26)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load (s8) from unknown-address + 27)
; UNALIGNED_GFX12-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; UNALIGNED_GFX12-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; UNALIGNED_GFX12-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p0) :: (load (s8) from unknown-address + 28)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p0) :: (load (s8) from unknown-address + 29)
; UNALIGNED_GFX12-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p0) :: (load (s8) from unknown-address + 30)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load (s8) from unknown-address + 31)
; UNALIGNED_GFX12-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -11824,25 +11824,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -11854,25 +11854,25 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 32)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 16, align 16)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 20)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 24, align 8)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s32) from unknown-address + 28)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -11884,7 +11884,7 @@ body: |
; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX9PLUS-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -11896,7 +11896,7 @@ body: |
; GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX11PLUS-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -11908,7 +11908,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -11920,7 +11920,7 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX9PLUS-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -11932,7 +11932,7 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX11PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX11PLUS-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -11944,7 +11944,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -11966,13 +11966,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -11984,13 +11984,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 16)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -12060,13 +12060,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -12078,13 +12078,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -12154,13 +12154,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -12172,13 +12172,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -12248,15 +12248,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12264,45 +12264,45 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12318,15 +12318,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12334,45 +12334,45 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12412,15 +12412,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12428,45 +12428,45 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX9PLUS-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX9PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12482,15 +12482,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12498,45 +12498,45 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX11PLUS-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11PLUS-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX11PLUS-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12552,15 +12552,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12568,45 +12568,45 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX12-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12632,7 +12632,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -12644,7 +12644,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -12714,7 +12714,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -12726,7 +12726,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -12796,15 +12796,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12812,15 +12812,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12836,15 +12836,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12852,15 +12852,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12900,15 +12900,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12916,15 +12916,15 @@ body: |
; UNALIGNED_GFX9PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX9PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX9PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX9PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12940,15 +12940,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX11PLUS-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX11PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11PLUS-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12956,15 +12956,15 @@ body: |
; UNALIGNED_GFX11PLUS-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11PLUS-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX11PLUS-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX11PLUS-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX11PLUS-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11PLUS-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12980,15 +12980,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12996,15 +12996,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-global.mir
index 2cc66996..1b72ce5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-global.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-global.mir
@@ -381,7 +381,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -401,7 +401,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -414,7 +414,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -434,7 +434,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -510,7 +510,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -530,7 +530,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -543,7 +543,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -563,7 +563,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -586,15 +586,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -616,15 +616,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -639,15 +639,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -669,15 +669,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -808,7 +808,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -821,7 +821,7 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -834,7 +834,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -847,7 +847,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -860,7 +860,7 @@ body: |
; GFX9-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -873,7 +873,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -897,13 +897,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -916,7 +916,7 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -929,13 +929,13 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -948,13 +948,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -967,7 +967,7 @@ body: |
; GFX9-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -980,13 +980,13 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1179,16 +1179,16 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1211,16 +1211,16 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1236,16 +1236,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1268,16 +1268,16 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1303,15 +1303,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1320,15 +1320,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1353,15 +1353,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1370,15 +1370,15 @@ body: |
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1396,15 +1396,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1413,15 +1413,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1446,15 +1446,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1463,15 +1463,15 @@ body: |
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -1559,7 +1559,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, align 8, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -1622,7 +1622,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -1685,22 +1685,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1722,22 +1722,22 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1751,22 +1751,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1788,22 +1788,22 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -1827,15 +1827,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1843,30 +1843,30 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -1890,15 +1890,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1906,30 +1906,30 @@ body: |
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -1945,15 +1945,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1961,30 +1961,30 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2008,15 +2008,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2024,30 +2024,30 @@ body: |
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2073,7 +2073,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2086,7 +2086,7 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; CI-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2099,7 +2099,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; CI-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2112,7 +2112,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2125,7 +2125,7 @@ body: |
; GFX9-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
; GFX9-HSA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX9-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2138,7 +2138,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
; GFX9-MESA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[LOAD1]](s32)
@@ -2161,10 +2161,10 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<2 x s32>) from unknown-address + 16, align 4, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32) from unknown-address + 24, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<2 x s32>)
; SI-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
@@ -2180,7 +2180,7 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; CI-HSA-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2196,7 +2196,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; CI-MESA-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2212,7 +2212,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; VI-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2228,7 +2228,7 @@ body: |
; GFX9-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX9-HSA-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2244,7 +2244,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; GFX9-MESA-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
@@ -2389,15 +2389,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2405,45 +2405,45 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -2467,15 +2467,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2483,45 +2483,45 @@ body: |
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; CI-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; CI-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -2537,15 +2537,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2553,45 +2553,45 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -2615,15 +2615,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2631,45 +2631,45 @@ body: |
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX9-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX9-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; GFX9-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; GFX9-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -2857,15 +2857,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2874,15 +2874,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2908,15 +2908,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2925,15 +2925,15 @@ body: |
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2952,15 +2952,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2969,15 +2969,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3003,15 +3003,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3020,15 +3020,15 @@ body: |
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3213,16 +3213,16 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3246,16 +3246,16 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3272,16 +3272,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3305,16 +3305,16 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -3341,15 +3341,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3358,15 +3358,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3392,15 +3392,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3409,15 +3409,15 @@ body: |
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3436,15 +3436,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3453,15 +3453,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3487,15 +3487,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3504,15 +3504,15 @@ body: |
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -3593,7 +3593,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -3614,7 +3614,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -3628,7 +3628,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -3649,7 +3649,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -3673,15 +3673,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3704,15 +3704,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3728,15 +3728,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3759,15 +3759,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3901,7 +3901,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -3921,7 +3921,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -3934,7 +3934,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -3954,7 +3954,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4178,13 +4178,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -4220,7 +4220,7 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4257,13 +4257,13 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -4299,13 +4299,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -4339,7 +4339,7 @@ body: |
; GFX9-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4374,13 +4374,13 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -4538,7 +4538,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4573,7 +4573,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4593,7 +4593,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4634,7 +4634,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -4670,15 +4670,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4714,15 +4714,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4743,15 +4743,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4793,15 +4793,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5050,7 +5050,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -5074,7 +5074,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-MESA-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -5091,7 +5091,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -5116,7 +5116,7 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -5138,15 +5138,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5172,15 +5172,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5199,15 +5199,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5233,16 +5233,16 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5426,10 +5426,10 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -5461,10 +5461,10 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -5496,10 +5496,10 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-MESA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -5531,10 +5531,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -5567,11 +5567,11 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -5596,11 +5596,11 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-MESA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -5636,10 +5636,10 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -5671,10 +5671,10 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -5706,10 +5706,10 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; CI-MESA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -5741,10 +5741,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -5777,11 +5777,11 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -5806,11 +5806,11 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-MESA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -5846,22 +5846,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -5895,10 +5895,10 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, align 1, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 1, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -5930,22 +5930,22 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -5979,22 +5979,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -6029,11 +6029,11 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -6057,24 +6057,24 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -6216,13 +6216,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -6252,13 +6252,13 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-MESA-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -6281,13 +6281,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -6318,15 +6318,15 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -6350,29 +6350,29 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -6404,29 +6404,29 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -6451,29 +6451,29 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -6505,32 +6505,32 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -6700,7 +6700,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p1) :: (load (<4 x s16>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
@@ -6723,16 +6723,16 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -6764,16 +6764,16 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; CI-MESA-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -6805,16 +6805,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -6847,19 +6847,19 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -6880,19 +6880,19 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -6928,7 +6928,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p1) :: (load (<4 x s16>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
@@ -6951,16 +6951,16 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -6992,16 +6992,16 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; CI-MESA-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -7033,16 +7033,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -7075,19 +7075,19 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -7108,19 +7108,19 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -7156,16 +7156,16 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -7197,16 +7197,16 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; CI-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -7238,16 +7238,16 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; CI-MESA-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -7279,16 +7279,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -7321,19 +7321,19 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -7354,19 +7354,19 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -7402,36 +7402,36 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
@@ -7465,16 +7465,16 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, align 1, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 1, addrspace 1)
; CI-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, align 1, addrspace 1)
; CI-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 1, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<6 x s16>)
@@ -7506,36 +7506,36 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
@@ -7569,36 +7569,36 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
@@ -7633,19 +7633,19 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -7665,40 +7665,40 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
; GFX9-MESA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[OR3]](s32)
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
@@ -7796,7 +7796,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, align 8, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -7859,7 +7859,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -7922,22 +7922,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -7959,22 +7959,22 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -7988,22 +7988,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -8025,22 +8025,22 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -8064,15 +8064,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8080,30 +8080,30 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -8127,15 +8127,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8143,30 +8143,30 @@ body: |
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -8182,15 +8182,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8198,30 +8198,30 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -8245,15 +8245,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8261,30 +8261,30 @@ body: |
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -8463,22 +8463,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -8516,22 +8516,22 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; CI-HSA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; CI-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-HSA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-HSA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-HSA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -8569,22 +8569,22 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; CI-MESA-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -8622,22 +8622,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -8676,27 +8676,27 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-HSA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; GFX9-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-HSA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32)
; GFX9-HSA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-HSA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32)
; GFX9-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -8719,27 +8719,27 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 8, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32)
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32)
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -8778,22 +8778,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -8831,22 +8831,22 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; CI-HSA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; CI-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-HSA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-HSA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-HSA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -8884,22 +8884,22 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; CI-MESA-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -8937,22 +8937,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -8991,27 +8991,27 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-HSA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; GFX9-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-HSA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32)
; GFX9-HSA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-HSA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32)
; GFX9-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -9034,27 +9034,27 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32)
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32)
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -9093,22 +9093,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -9146,22 +9146,22 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; CI-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; CI-HSA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; CI-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-HSA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-HSA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-HSA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -9199,22 +9199,22 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
; CI-MESA-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -9252,22 +9252,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -9306,27 +9306,27 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-HSA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; GFX9-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-HSA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32)
; GFX9-HSA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-HSA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32)
; GFX9-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -9349,27 +9349,27 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32)
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
; GFX9-MESA-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32)
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -9408,50 +9408,50 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
@@ -9491,22 +9491,22 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, align 1, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 1, addrspace 1)
; CI-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, align 1, addrspace 1)
; CI-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 1, addrspace 1)
; CI-HSA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; CI-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-HSA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, align 1, addrspace 1)
; CI-HSA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-HSA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 1, addrspace 1)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<8 x s16>)
@@ -9544,50 +9544,50 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
; CI-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
; CI-MESA-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; CI-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; CI-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
@@ -9627,50 +9627,50 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
@@ -9711,27 +9711,27 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-HSA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-HSA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-HSA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-HSA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-HSA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32)
; GFX9-HSA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; GFX9-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-HSA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32)
; GFX9-HSA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-HSA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32)
; GFX9-HSA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -9753,56 +9753,56 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-MESA-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-MESA-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
; GFX9-MESA-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[OR3]](s32)
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
; GFX9-MESA-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[OR4]](s32)
; GFX9-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
; GFX9-MESA-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[OR5]](s32)
; GFX9-MESA-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; GFX9-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
@@ -10063,15 +10063,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10091,15 +10091,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10112,15 +10112,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10140,15 +10140,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10171,15 +10171,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -10187,15 +10187,15 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -10217,15 +10217,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -10233,15 +10233,15 @@ body: |
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -10256,15 +10256,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -10272,15 +10272,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -10302,15 +10302,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -10318,15 +10318,15 @@ body: |
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -10405,7 +10405,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -10878,16 +10878,16 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10896,16 +10896,16 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; SI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s16) from unknown-address + 14, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -10929,16 +10929,16 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10947,16 +10947,16 @@ body: |
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; CI-MESA-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s16) from unknown-address + 14, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -10973,16 +10973,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10991,16 +10991,16 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; VI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s16) from unknown-address + 14, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -11024,16 +11024,16 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11042,16 +11042,16 @@ body: |
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[ZEXT]]
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
; GFX9-MESA-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s16) from unknown-address + 14, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -11078,15 +11078,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11095,15 +11095,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11114,30 +11114,30 @@ body: |
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; SI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; SI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11163,15 +11163,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11180,15 +11180,15 @@ body: |
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11199,30 +11199,30 @@ body: |
; CI-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; CI-MESA-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; CI-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; CI-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; CI-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; CI-MESA-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; CI-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; CI-MESA-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11241,15 +11241,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11258,15 +11258,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11277,30 +11277,30 @@ body: |
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; VI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11326,15 +11326,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11343,15 +11343,15 @@ body: |
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11362,30 +11362,30 @@ body: |
; GFX9-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; GFX9-MESA-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; GFX9-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; GFX9-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; GFX9-MESA-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; GFX9-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; GFX9-MESA-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11550,7 +11550,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -11564,7 +11564,7 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -11578,7 +11578,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
; CI-MESA-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; CI-MESA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -11592,7 +11592,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
; VI-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -11606,7 +11606,7 @@ body: |
; GFX9-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
; GFX9-HSA-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX9-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -11620,7 +11620,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
; GFX9-MESA-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX9-MESA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -11646,15 +11646,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11663,15 +11663,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11682,30 +11682,30 @@ body: |
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; SI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; SI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11716,30 +11716,30 @@ body: |
; SI-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; SI-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; SI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; SI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; SI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; SI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; SI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; SI-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; SI-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; SI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; SI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; SI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -11760,7 +11760,7 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 1, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, align 1, addrspace 1)
; CI-HSA-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; CI-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -11774,15 +11774,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11791,15 +11791,15 @@ body: |
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11810,30 +11810,30 @@ body: |
; CI-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; CI-MESA-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; CI-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; CI-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; CI-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; CI-MESA-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; CI-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; CI-MESA-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11844,30 +11844,30 @@ body: |
; CI-MESA-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; CI-MESA-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; CI-MESA-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; CI-MESA-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-MESA-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; CI-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; CI-MESA-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; CI-MESA-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; CI-MESA-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; CI-MESA-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; CI-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; CI-MESA-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-MESA-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; CI-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; CI-MESA-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-MESA-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -11888,15 +11888,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11905,15 +11905,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11924,30 +11924,30 @@ body: |
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; VI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -11958,30 +11958,30 @@ body: |
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; VI-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; VI-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; VI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -12002,7 +12002,7 @@ body: |
; GFX9-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 1, addrspace 1)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX9-HSA-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -12016,15 +12016,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12033,15 +12033,15 @@ body: |
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12052,30 +12052,30 @@ body: |
; GFX9-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; GFX9-MESA-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; GFX9-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; GFX9-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; GFX9-MESA-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; GFX9-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; GFX9-MESA-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -12086,30 +12086,30 @@ body: |
; GFX9-MESA-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; GFX9-MESA-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; GFX9-MESA-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; GFX9-MESA-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; GFX9-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; GFX9-MESA-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; GFX9-MESA-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; GFX9-MESA-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; GFX9-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; GFX9-MESA-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; GFX9-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; GFX9-MESA-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -12246,15 +12246,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12263,15 +12263,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12282,30 +12282,30 @@ body: |
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; SI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; SI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -12316,30 +12316,30 @@ body: |
; SI-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; SI-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; SI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; SI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; SI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; SI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; SI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; SI-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; SI-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; SI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; SI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; SI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -12350,30 +12350,30 @@ body: |
; SI-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; SI-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
; SI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; SI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; SI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; SI-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p1) :: (load (s8) from unknown-address + 24, addrspace 1)
- ; SI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p1) :: (load (s8) from unknown-address + 25, addrspace 1)
; SI-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; SI-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; SI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p1) :: (load (s8) from unknown-address + 26, addrspace 1)
- ; SI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load (s8) from unknown-address + 27, addrspace 1)
; SI-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; SI-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; SI-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; SI-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; SI-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; SI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p1) :: (load (s8) from unknown-address + 28, addrspace 1)
- ; SI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p1) :: (load (s8) from unknown-address + 29, addrspace 1)
; SI-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; SI-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; SI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p1) :: (load (s8) from unknown-address + 30, addrspace 1)
- ; SI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; SI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load (s8) from unknown-address + 31, addrspace 1)
; SI-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; SI-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -12399,15 +12399,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12416,15 +12416,15 @@ body: |
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12435,30 +12435,30 @@ body: |
; CI-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; CI-MESA-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; CI-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; CI-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; CI-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; CI-MESA-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; CI-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; CI-MESA-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -12469,30 +12469,30 @@ body: |
; CI-MESA-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; CI-MESA-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; CI-MESA-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; CI-MESA-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-MESA-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; CI-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; CI-MESA-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; CI-MESA-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; CI-MESA-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; CI-MESA-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; CI-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; CI-MESA-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-MESA-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; CI-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; CI-MESA-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-MESA-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -12503,30 +12503,30 @@ body: |
; CI-MESA-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; CI-MESA-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
; CI-MESA-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; CI-MESA-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p1) :: (load (s8) from unknown-address + 24, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p1) :: (load (s8) from unknown-address + 25, addrspace 1)
; CI-MESA-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; CI-MESA-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; CI-MESA-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p1) :: (load (s8) from unknown-address + 26, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load (s8) from unknown-address + 27, addrspace 1)
; CI-MESA-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; CI-MESA-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; CI-MESA-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; CI-MESA-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; CI-MESA-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; CI-MESA-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p1) :: (load (s8) from unknown-address + 28, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p1) :: (load (s8) from unknown-address + 29, addrspace 1)
; CI-MESA-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; CI-MESA-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; CI-MESA-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p1) :: (load (s8) from unknown-address + 30, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load (s8) from unknown-address + 31, addrspace 1)
; CI-MESA-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; CI-MESA-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -12545,15 +12545,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12562,15 +12562,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12581,30 +12581,30 @@ body: |
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; VI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -12615,30 +12615,30 @@ body: |
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; VI-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; VI-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; VI-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -12649,30 +12649,30 @@ body: |
; VI-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; VI-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
; VI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; VI-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p1) :: (load (s8) from unknown-address + 24, addrspace 1)
- ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p1) :: (load (s8) from unknown-address + 25, addrspace 1)
; VI-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; VI-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p1) :: (load (s8) from unknown-address + 26, addrspace 1)
- ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load (s8) from unknown-address + 27, addrspace 1)
; VI-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; VI-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; VI-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; VI-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; VI-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p1) :: (load (s8) from unknown-address + 28, addrspace 1)
- ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p1) :: (load (s8) from unknown-address + 29, addrspace 1)
; VI-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; VI-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p1) :: (load (s8) from unknown-address + 30, addrspace 1)
- ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load (s8) from unknown-address + 31, addrspace 1)
; VI-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; VI-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -12698,15 +12698,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12715,15 +12715,15 @@ body: |
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12734,30 +12734,30 @@ body: |
; GFX9-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; GFX9-MESA-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
; GFX9-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; GFX9-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; GFX9-MESA-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; GFX9-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; GFX9-MESA-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -12768,30 +12768,30 @@ body: |
; GFX9-MESA-NEXT: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[COPY1]](s32)
; GFX9-MESA-NEXT: [[OR13:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[ZEXT1]]
; GFX9-MESA-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; GFX9-MESA-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[ZEXTLOAD12]]
- ; GFX9-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; GFX9-MESA-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD14]]
; GFX9-MESA-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[OR15]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[OR14]]
; GFX9-MESA-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR16]](s32)
- ; GFX9-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; GFX9-MESA-NEXT: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR17:%[0-9]+]]:_(s32) = G_OR [[SHL17]], [[ZEXTLOAD15]]
- ; GFX9-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; GFX9-MESA-NEXT: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR18:%[0-9]+]]:_(s32) = G_OR [[SHL18]], [[ZEXTLOAD17]]
@@ -12802,30 +12802,30 @@ body: |
; GFX9-MESA-NEXT: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[COPY2]](s32)
; GFX9-MESA-NEXT: [[OR20:%[0-9]+]]:_(s64) = G_OR [[SHL20]], [[ZEXT2]]
; GFX9-MESA-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; GFX9-MESA-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD18:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD23]](p1) :: (load (s8) from unknown-address + 24, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD19:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD24]](p1) :: (load (s8) from unknown-address + 25, addrspace 1)
; GFX9-MESA-NEXT: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD19]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR21:%[0-9]+]]:_(s32) = G_OR [[SHL21]], [[ZEXTLOAD18]]
- ; GFX9-MESA-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD20:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD25]](p1) :: (load (s8) from unknown-address + 26, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD25]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load (s8) from unknown-address + 27, addrspace 1)
; GFX9-MESA-NEXT: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[LOAD6]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR22:%[0-9]+]]:_(s32) = G_OR [[SHL22]], [[ZEXTLOAD20]]
; GFX9-MESA-NEXT: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[OR22]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR23:%[0-9]+]]:_(s32) = G_OR [[SHL23]], [[OR21]]
; GFX9-MESA-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR23]](s32)
- ; GFX9-MESA-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD21:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD27]](p1) :: (load (s8) from unknown-address + 28, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD22:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD28]](p1) :: (load (s8) from unknown-address + 29, addrspace 1)
; GFX9-MESA-NEXT: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD22]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR24:%[0-9]+]]:_(s32) = G_OR [[SHL24]], [[ZEXTLOAD21]]
- ; GFX9-MESA-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD23:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD29]](p1) :: (load (s8) from unknown-address + 30, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD29]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load (s8) from unknown-address + 31, addrspace 1)
; GFX9-MESA-NEXT: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[LOAD7]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR25:%[0-9]+]]:_(s32) = G_OR [[SHL25]], [[ZEXTLOAD23]]
@@ -13086,15 +13086,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13102,45 +13102,45 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13164,15 +13164,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13180,45 +13180,45 @@ body: |
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; CI-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; CI-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13234,15 +13234,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13250,45 +13250,45 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13312,15 +13312,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13328,45 +13328,45 @@ body: |
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX9-MESA-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX9-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; GFX9-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; GFX9-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13566,15 +13566,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13582,15 +13582,15 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13614,15 +13614,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13630,15 +13630,15 @@ body: |
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13654,15 +13654,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13670,15 +13670,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13702,15 +13702,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13718,15 +13718,15 @@ body: |
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13856,13 +13856,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -13875,7 +13875,7 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -13888,13 +13888,13 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -13907,13 +13907,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -13926,7 +13926,7 @@ body: |
; GFX9-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -13939,13 +13939,13 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -13967,7 +13967,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -13980,7 +13980,7 @@ body: |
; CI-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; CI-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -13993,7 +13993,7 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -14006,7 +14006,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -14019,7 +14019,7 @@ body: |
; GFX9-HSA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-HSA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; GFX9-HSA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-HSA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -14032,7 +14032,7 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -14734,15 +14734,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14750,30 +14750,30 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -14782,43 +14782,43 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; SI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; SI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; SI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; SI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; SI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; SI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; SI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
; SI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; SI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; SI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; SI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; SI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; SI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -14838,7 +14838,7 @@ body: |
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 1, addrspace 1)
; CI-HSA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 1, addrspace 1)
; CI-HSA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; CI-HSA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -14852,15 +14852,15 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14868,30 +14868,30 @@ body: |
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; CI-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; CI-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -14900,43 +14900,43 @@ body: |
; CI-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; CI-MESA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; CI-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; CI-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; CI-MESA-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; CI-MESA-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; CI-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; CI-MESA-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-MESA-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; CI-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; CI-MESA-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; CI-MESA-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; CI-MESA-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; CI-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; CI-MESA-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-MESA-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; CI-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; CI-MESA-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-MESA-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -14955,15 +14955,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14971,30 +14971,30 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -15003,43 +15003,43 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; VI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -15059,7 +15059,7 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 1, addrspace 1)
; GFX9-HSA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 1, addrspace 1)
; GFX9-HSA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-HSA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15073,15 +15073,15 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s8) from unknown-address + 2, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s8) from unknown-address + 3, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15089,30 +15089,30 @@ body: |
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s8) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p1) :: (load (s8) from unknown-address + 5, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s8) from unknown-address + 6, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s8) from unknown-address + 7, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; GFX9-MESA-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s8) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p1) :: (load (s8) from unknown-address + 9, addrspace 1)
; GFX9-MESA-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s8) from unknown-address + 10, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s8) from unknown-address + 11, addrspace 1)
; GFX9-MESA-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -15121,43 +15121,43 @@ body: |
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; GFX9-MESA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-MESA-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p1) :: (load (s8) from unknown-address + 12, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p1) :: (load (s8) from unknown-address + 13, addrspace 1)
; GFX9-MESA-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p1) :: (load (s8) from unknown-address + 14, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load (s8) from unknown-address + 15, addrspace 1)
; GFX9-MESA-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; GFX9-MESA-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; GFX9-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p1) :: (load (s8) from unknown-address + 16, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p1) :: (load (s8) from unknown-address + 17, addrspace 1)
; GFX9-MESA-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; GFX9-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p1) :: (load (s8) from unknown-address + 18, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load (s8) from unknown-address + 19, addrspace 1)
; GFX9-MESA-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; GFX9-MESA-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; GFX9-MESA-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; GFX9-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p1) :: (load (s8) from unknown-address + 20, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p1) :: (load (s8) from unknown-address + 21, addrspace 1)
; GFX9-MESA-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; GFX9-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p1) :: (load (s8) from unknown-address + 22, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load (s8) from unknown-address + 23, addrspace 1)
; GFX9-MESA-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -15189,43 +15189,43 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s16) from unknown-address + 14, addrspace 1)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s16) from unknown-address + 16, addrspace 1)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s16) from unknown-address + 18, addrspace 1)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s16) from unknown-address + 20, addrspace 1)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s16) from unknown-address + 22, addrspace 1)
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -15243,7 +15243,7 @@ body: |
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 2, addrspace 1)
; CI-HSA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 2, addrspace 1)
; CI-HSA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; CI-HSA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15257,43 +15257,43 @@ body: |
; CI-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CI-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; CI-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; CI-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; CI-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; CI-MESA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s16) from unknown-address + 14, addrspace 1)
; CI-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s16) from unknown-address + 16, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s16) from unknown-address + 18, addrspace 1)
; CI-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
; CI-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s16) from unknown-address + 20, addrspace 1)
- ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s16) from unknown-address + 22, addrspace 1)
; CI-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -15310,43 +15310,43 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s16) from unknown-address + 14, addrspace 1)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s16) from unknown-address + 16, addrspace 1)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s16) from unknown-address + 18, addrspace 1)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s16) from unknown-address + 20, addrspace 1)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s16) from unknown-address + 22, addrspace 1)
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -15364,7 +15364,7 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 2, addrspace 1)
; GFX9-HSA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 2, addrspace 1)
; GFX9-HSA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-HSA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15378,43 +15378,43 @@ body: |
; GFX9-MESA-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX9-MESA-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 2, addrspace 1)
; GFX9-MESA-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-MESA-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-MESA-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p1) :: (load (s16) from unknown-address + 4, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s16) from unknown-address + 6, addrspace 1)
; GFX9-MESA-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-MESA-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p1) :: (load (s16) from unknown-address + 8, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load (s16) from unknown-address + 10, addrspace 1)
; GFX9-MESA-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-MESA-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; GFX9-MESA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-MESA-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p1) :: (load (s16) from unknown-address + 12, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load (s16) from unknown-address + 14, addrspace 1)
; GFX9-MESA-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p1) :: (load (s16) from unknown-address + 16, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load (s16) from unknown-address + 18, addrspace 1)
; GFX9-MESA-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
; GFX9-MESA-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p1) :: (load (s16) from unknown-address + 20, addrspace 1)
- ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load (s16) from unknown-address + 22, addrspace 1)
; GFX9-MESA-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-MESA-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -15444,15 +15444,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, addrspace 1)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 1)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s32) from unknown-address + 20, addrspace 1)
; SI-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV2]](s32), [[UV3]](s32), [[LOAD3]](s32)
@@ -15469,7 +15469,7 @@ body: |
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 4, addrspace 1)
; CI-HSA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; CI-HSA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; CI-HSA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15484,7 +15484,7 @@ body: |
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 4, addrspace 1)
; CI-MESA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; CI-MESA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; CI-MESA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15499,7 +15499,7 @@ body: |
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 4, addrspace 1)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; VI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15514,7 +15514,7 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 4, addrspace 1)
; GFX9-HSA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-HSA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15529,7 +15529,7 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 4, addrspace 1)
; GFX9-MESA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-MESA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15559,10 +15559,10 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32) from unknown-address + 20, addrspace 1)
; SI-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[LOAD2]](s32)
@@ -15579,7 +15579,7 @@ body: |
; CI-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 16, addrspace 1)
; CI-HSA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; CI-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; CI-HSA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; CI-HSA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15594,7 +15594,7 @@ body: |
; CI-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 16, addrspace 1)
; CI-MESA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; CI-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; CI-MESA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; CI-MESA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15609,7 +15609,7 @@ body: |
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 16, addrspace 1)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; VI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15624,7 +15624,7 @@ body: |
; GFX9-HSA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 16, addrspace 1)
; GFX9-HSA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-HSA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-HSA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-HSA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; GFX9-HSA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-HSA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -15639,7 +15639,7 @@ body: |
; GFX9-MESA-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p1) :: (load (<3 x s32>), align 16, addrspace 1)
; GFX9-MESA-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-MESA-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-MESA-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-MESA-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 1)
; GFX9-MESA-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-MESA-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir
index 3ec2e15..ff43b07 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir
@@ -550,7 +550,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -563,7 +563,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -576,7 +576,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -589,7 +589,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -602,7 +602,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -622,7 +622,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -642,7 +642,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -753,7 +753,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -766,7 +766,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -779,7 +779,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -792,7 +792,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -805,7 +805,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -825,7 +825,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -845,7 +845,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -875,15 +875,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -898,15 +898,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -921,15 +921,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -944,15 +944,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -967,15 +967,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -997,15 +997,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1027,15 +1027,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1229,7 +1229,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1242,7 +1242,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1255,7 +1255,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1268,7 +1268,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1281,7 +1281,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1294,7 +1294,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-UNALIGNED-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1307,7 +1307,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1320,7 +1320,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1333,7 +1333,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1346,7 +1346,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-UNALIGNED-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, align 2, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1370,13 +1370,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1389,13 +1389,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1408,13 +1408,13 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1427,13 +1427,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1446,13 +1446,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1465,7 +1465,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-UNALIGNED-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1478,13 +1478,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1497,7 +1497,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1510,13 +1510,13 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1529,7 +1529,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-UNALIGNED-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1794,16 +1794,16 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1819,16 +1819,16 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1844,16 +1844,16 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1869,16 +1869,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1894,16 +1894,16 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1926,16 +1926,16 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -1952,7 +1952,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
@@ -1966,16 +1966,16 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2008,15 +2008,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2025,15 +2025,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2051,15 +2051,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2068,15 +2068,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2094,15 +2094,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2111,15 +2111,15 @@ body: |
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2137,15 +2137,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2154,15 +2154,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2180,15 +2180,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2197,15 +2197,15 @@ body: |
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2230,15 +2230,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2247,15 +2247,15 @@ body: |
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2274,7 +2274,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
@@ -2288,15 +2288,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2305,15 +2305,15 @@ body: |
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2348,15 +2348,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2364,29 +2364,29 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2402,15 +2402,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2418,29 +2418,29 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2456,15 +2456,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2472,29 +2472,29 @@ body: |
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-DS128-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-DS128-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-DS128-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-DS128-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-DS128-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2510,15 +2510,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2526,29 +2526,29 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2564,15 +2564,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2580,29 +2580,29 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2626,15 +2626,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2642,29 +2642,29 @@ body: |
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2680,10 +2680,10 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2695,15 +2695,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2711,29 +2711,29 @@ body: |
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2767,7 +2767,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -2780,7 +2780,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; CI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -2793,10 +2793,10 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-DS128-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2808,10 +2808,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2823,10 +2823,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2846,10 +2846,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2861,10 +2861,10 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2876,10 +2876,10 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2909,7 +2909,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -2922,7 +2922,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -2935,10 +2935,10 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-DS128-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2950,10 +2950,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2965,10 +2965,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2988,10 +2988,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3003,10 +3003,10 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3018,10 +3018,10 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3051,22 +3051,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3080,22 +3080,22 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3109,22 +3109,22 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-DS128-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3138,22 +3138,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3167,22 +3167,22 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3204,22 +3204,22 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3233,10 +3233,10 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3248,22 +3248,22 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3295,15 +3295,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3311,29 +3311,29 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3349,15 +3349,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3365,29 +3365,29 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3403,15 +3403,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3419,29 +3419,29 @@ body: |
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-DS128-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-DS128-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-DS128-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-DS128-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-DS128-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3457,15 +3457,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3473,29 +3473,29 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3511,15 +3511,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3527,29 +3527,29 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3573,15 +3573,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3589,29 +3589,29 @@ body: |
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3627,10 +3627,10 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3642,15 +3642,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3658,29 +3658,29 @@ body: |
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3714,15 +3714,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3730,44 +3730,44 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -3784,15 +3784,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3800,44 +3800,44 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -3854,15 +3854,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3870,44 +3870,44 @@ body: |
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-DS128-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-DS128-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-DS128-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-DS128-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-DS128-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-DS128-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-DS128-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-DS128-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-DS128-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-DS128-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-DS128-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -3923,15 +3923,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3939,44 +3939,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -3992,15 +3992,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4008,44 +4008,44 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4069,15 +4069,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4085,44 +4085,44 @@ body: |
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4138,13 +4138,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4156,15 +4156,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4172,44 +4172,44 @@ body: |
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4243,7 +4243,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
@@ -4255,7 +4255,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
@@ -4299,13 +4299,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4317,13 +4317,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4361,7 +4361,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
@@ -4373,7 +4373,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
@@ -4385,13 +4385,13 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-DS128-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4403,13 +4403,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4421,13 +4421,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4447,13 +4447,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4465,13 +4465,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4483,13 +4483,13 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4519,29 +4519,29 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4556,29 +4556,29 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4593,29 +4593,29 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-DS128-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4629,29 +4629,29 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4665,29 +4665,29 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4709,29 +4709,29 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4745,13 +4745,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4763,29 +4763,29 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4817,15 +4817,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4833,44 +4833,44 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4887,15 +4887,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4903,44 +4903,44 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4957,15 +4957,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4973,44 +4973,44 @@ body: |
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-DS128-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-DS128-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-DS128-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-DS128-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-DS128-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-DS128-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-DS128-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-DS128-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-DS128-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-DS128-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-DS128-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5026,15 +5026,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5042,44 +5042,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5095,15 +5095,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5111,44 +5111,44 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5172,15 +5172,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5188,44 +5188,44 @@ body: |
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5241,13 +5241,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -5259,15 +5259,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5275,44 +5275,44 @@ body: |
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5427,7 +5427,7 @@ body: |
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; SI-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
@@ -5478,7 +5478,7 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
@@ -5494,7 +5494,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
@@ -5533,16 +5533,16 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5559,16 +5559,16 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5585,16 +5585,16 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5611,16 +5611,16 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5637,16 +5637,16 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5670,16 +5670,16 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5697,7 +5697,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
@@ -5712,16 +5712,16 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -5755,15 +5755,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5772,15 +5772,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5799,15 +5799,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5816,15 +5816,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5843,15 +5843,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5860,15 +5860,15 @@ body: |
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5887,15 +5887,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5904,15 +5904,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5931,15 +5931,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5948,15 +5948,15 @@ body: |
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -5982,15 +5982,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5999,15 +5999,15 @@ body: |
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6027,7 +6027,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
@@ -6042,15 +6042,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6059,15 +6059,15 @@ body: |
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6183,7 +6183,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6197,7 +6197,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6211,7 +6211,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6225,7 +6225,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6239,7 +6239,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6260,7 +6260,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6281,7 +6281,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6312,15 +6312,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6336,15 +6336,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6360,15 +6360,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6384,15 +6384,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6408,15 +6408,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6439,15 +6439,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6470,15 +6470,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6591,7 +6591,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6605,7 +6605,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6619,7 +6619,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6633,7 +6633,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6647,7 +6647,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6668,7 +6668,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6689,7 +6689,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6720,15 +6720,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6744,15 +6744,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6768,15 +6768,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6792,15 +6792,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6816,15 +6816,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6847,15 +6847,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6878,15 +6878,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7001,7 +7001,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7016,7 +7016,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7031,7 +7031,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7046,7 +7046,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7061,7 +7061,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7086,7 +7086,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7111,7 +7111,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7465,13 +7465,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -7507,13 +7507,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -7549,13 +7549,13 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -7591,13 +7591,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -7631,13 +7631,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -7671,7 +7671,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-UNALIGNED-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7706,13 +7706,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -7746,7 +7746,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7781,13 +7781,13 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -7821,7 +7821,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-UNALIGNED-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -8030,15 +8030,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8046,43 +8046,43 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8097,15 +8097,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8113,43 +8113,43 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8164,15 +8164,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8180,44 +8180,44 @@ body: |
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-DS128-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-DS128-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-DS128-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-DS128-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-DS128-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-DS128-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-DS128-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-DS128-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-DS128-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-DS128-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-DS128-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8232,15 +8232,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8248,44 +8248,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8300,15 +8300,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8316,44 +8316,44 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8375,15 +8375,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8391,44 +8391,44 @@ body: |
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8443,13 +8443,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -8460,15 +8460,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8476,44 +8476,44 @@ body: |
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8626,7 +8626,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -8643,7 +8643,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -8660,7 +8660,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-DS128-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -8677,7 +8677,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -8695,7 +8695,7 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -8715,7 +8715,7 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -8735,7 +8735,7 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -8764,15 +8764,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -8791,15 +8791,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -8818,15 +8818,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -8845,15 +8845,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -8872,16 +8872,16 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -8902,16 +8902,16 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -8932,16 +8932,16 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9220,10 +9220,10 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -9255,10 +9255,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -9290,10 +9290,10 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; CI-DS128-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-DS128-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -9325,10 +9325,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -9361,11 +9361,11 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -9390,11 +9390,11 @@ body: |
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-UNALIGNED-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -9419,11 +9419,11 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX10-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -9448,11 +9448,11 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -9477,11 +9477,11 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -9506,11 +9506,11 @@ body: |
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX11-UNALIGNED-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -9546,22 +9546,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -9595,22 +9595,22 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -9644,22 +9644,22 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-DS128-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -9693,22 +9693,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -9742,24 +9742,24 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -9786,11 +9786,11 @@ body: |
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, align 1, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, align 1, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-UNALIGNED-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -9814,24 +9814,24 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -9858,11 +9858,11 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -9886,24 +9886,24 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -9930,11 +9930,11 @@ body: |
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, align 1, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, align 1, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX11-UNALIGNED-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -10049,13 +10049,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), align 4, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, align 4, addrspace 3)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -10114,15 +10114,15 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), align 4, addrspace 3)
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, align 4, addrspace 3)
; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -10137,15 +10137,15 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), align 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, align 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -10182,13 +10182,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -10211,13 +10211,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -10240,13 +10240,13 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-DS128-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-DS128-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -10269,13 +10269,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C3]]
@@ -10299,15 +10299,15 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -10329,15 +10329,15 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -10352,15 +10352,15 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -10375,15 +10375,15 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -10414,29 +10414,29 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -10461,29 +10461,29 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -10508,29 +10508,29 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-DS128-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -10555,29 +10555,29 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -10602,32 +10602,32 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -10650,32 +10650,32 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -10692,15 +10692,15 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s16), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -10714,32 +10714,32 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -10932,15 +10932,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10953,15 +10953,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10974,15 +10974,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -10995,15 +10995,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11016,15 +11016,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11044,15 +11044,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11065,7 +11065,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11076,15 +11076,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11114,15 +11114,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11130,15 +11130,15 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11153,15 +11153,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11169,15 +11169,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11192,15 +11192,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11208,15 +11208,15 @@ body: |
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11231,15 +11231,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11247,15 +11247,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11270,15 +11270,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11286,15 +11286,15 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11316,15 +11316,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11332,15 +11332,15 @@ body: |
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11355,7 +11355,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11366,15 +11366,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11382,15 +11382,15 @@ body: |
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11422,15 +11422,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11438,29 +11438,29 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -11475,15 +11475,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11491,29 +11491,29 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -11528,15 +11528,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11544,29 +11544,29 @@ body: |
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-DS128-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-DS128-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-DS128-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-DS128-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-DS128-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -11581,15 +11581,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11597,29 +11597,29 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -11634,15 +11634,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11650,29 +11650,29 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -11694,15 +11694,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11710,29 +11710,29 @@ body: |
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -11747,10 +11747,10 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -11761,15 +11761,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11777,29 +11777,29 @@ body: |
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -11831,7 +11831,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -11843,7 +11843,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -11855,10 +11855,10 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-DS128-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -11869,10 +11869,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -11883,10 +11883,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -11904,10 +11904,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -11918,10 +11918,10 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -11932,10 +11932,10 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX11-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -11963,7 +11963,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 16, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
@@ -11974,7 +11974,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 16, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
@@ -12051,7 +12051,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
@@ -12062,7 +12062,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
@@ -12101,13 +12101,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -12118,13 +12118,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -12159,7 +12159,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
@@ -12170,7 +12170,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
@@ -12181,13 +12181,13 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-DS128-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -12198,13 +12198,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -12215,13 +12215,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -12239,13 +12239,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -12256,13 +12256,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -12273,13 +12273,13 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -12307,29 +12307,29 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -12343,29 +12343,29 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -12379,29 +12379,29 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-DS128-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -12414,29 +12414,29 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -12449,29 +12449,29 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -12491,29 +12491,29 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -12526,13 +12526,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -12543,29 +12543,29 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -12595,15 +12595,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12611,44 +12611,44 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12664,15 +12664,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12680,44 +12680,44 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12733,15 +12733,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12749,44 +12749,44 @@ body: |
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-DS128-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-DS128-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-DS128-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-DS128-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-DS128-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-DS128-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-DS128-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-DS128-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-DS128-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-DS128-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-DS128-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12801,15 +12801,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12817,44 +12817,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12869,15 +12869,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12885,44 +12885,44 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12944,15 +12944,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12960,44 +12960,44 @@ body: |
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13012,13 +13012,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13029,15 +13029,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13045,44 +13045,44 @@ body: |
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13114,13 +13114,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 32, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 16, align 16, addrspace 3)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<2 x s32>) from unknown-address + 24, addrspace 3)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13131,13 +13131,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 32, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 16, align 16, addrspace 3)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<2 x s32>) from unknown-address + 24, addrspace 3)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13148,7 +13148,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; CI-DS128-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; CI-DS128-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13159,7 +13159,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; VI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13170,7 +13170,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13181,7 +13181,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX9-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13192,7 +13192,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13203,7 +13203,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13214,7 +13214,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13225,7 +13225,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX11-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -13246,25 +13246,25 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 32, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 16, align 16, addrspace 3)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<2 x s32>) from unknown-address + 24, addrspace 3)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load (<2 x s32>) from unknown-address + 32, align 32, addrspace 3)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load (<2 x s32>) from unknown-address + 40, addrspace 3)
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load (<2 x s32>) from unknown-address + 48, align 16, addrspace 3)
; SI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; SI-NEXT: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load (<2 x s32>) from unknown-address + 56, addrspace 3)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>), [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13275,25 +13275,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 32, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 16, align 16, addrspace 3)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<2 x s32>) from unknown-address + 24, addrspace 3)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load (<2 x s32>) from unknown-address + 32, align 32, addrspace 3)
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load (<2 x s32>) from unknown-address + 40, addrspace 3)
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load (<2 x s32>) from unknown-address + 48, align 16, addrspace 3)
; CI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load (<2 x s32>) from unknown-address + 56, addrspace 3)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>), [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13304,13 +13304,13 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
; CI-DS128-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; CI-DS128-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13321,13 +13321,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
; VI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13338,13 +13338,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13355,13 +13355,13 @@ body: |
; GFX9-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX9-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13372,13 +13372,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13389,13 +13389,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13406,13 +13406,13 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13423,13 +13423,13 @@ body: |
; GFX11-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX11-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -13450,7 +13450,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
@@ -13461,7 +13461,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
@@ -13472,7 +13472,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
; CI-DS128-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
@@ -13483,7 +13483,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
@@ -13494,7 +13494,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
@@ -13512,7 +13512,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
@@ -13523,7 +13523,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
@@ -13534,7 +13534,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
@@ -13562,15 +13562,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13579,15 +13579,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13597,30 +13597,30 @@ body: |
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; SI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; SI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -13639,15 +13639,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13656,15 +13656,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13674,30 +13674,30 @@ body: |
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; CI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -13716,15 +13716,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13733,15 +13733,15 @@ body: |
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13751,30 +13751,30 @@ body: |
; CI-DS128-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CI-DS128-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; CI-DS128-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-DS128-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-DS128-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-DS128-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; CI-DS128-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; CI-DS128-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; CI-DS128-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-DS128-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-DS128-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-DS128-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -13793,15 +13793,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13810,15 +13810,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13828,30 +13828,30 @@ body: |
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; VI-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -13870,15 +13870,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13887,15 +13887,15 @@ body: |
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13905,30 +13905,30 @@ body: |
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; GFX9-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX9-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -13954,15 +13954,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13971,15 +13971,15 @@ body: |
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -13989,30 +13989,30 @@ body: |
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; GFX10-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; GFX10-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX10-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -14032,17 +14032,17 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; GFX10-UNALIGNED-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[ZEXT]]
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD2]](s32)
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
@@ -14057,15 +14057,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14074,15 +14074,15 @@ body: |
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -14092,30 +14092,30 @@ body: |
; GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; GFX11-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; GFX11-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX11-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -14151,10 +14151,10 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 32, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
; SI-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64), [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
@@ -14167,10 +14167,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 32, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
; CI-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64), [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
@@ -14183,7 +14183,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; CI-DS128-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; CI-DS128-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -14197,7 +14197,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; VI-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -14211,7 +14211,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -14225,7 +14225,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX9-UNALIGNED-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -14239,7 +14239,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; GFX10-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX10-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -14253,7 +14253,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX10-UNALIGNED-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -14267,7 +14267,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; GFX11-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -14281,7 +14281,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX11-UNALIGNED-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -14307,13 +14307,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 32, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p3) :: (load (s64) from unknown-address + 24, addrspace 3)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64), [[LOAD3]](s64)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>)
@@ -14324,13 +14324,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load (s64), align 32, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p3) :: (load (s64) from unknown-address + 24, addrspace 3)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64), [[LOAD3]](s64)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>)
@@ -14341,7 +14341,7 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
; CI-DS128-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; CI-DS128-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -14352,7 +14352,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
; VI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -14363,7 +14363,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -14374,7 +14374,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX9-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -14385,7 +14385,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -14396,7 +14396,7 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX10-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -14407,7 +14407,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -14418,7 +14418,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX11-UNALIGNED-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -14439,7 +14439,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
@@ -14451,7 +14451,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
@@ -14463,13 +14463,13 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-DS128-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -14481,13 +14481,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -14499,13 +14499,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -14525,13 +14525,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -14543,13 +14543,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -14561,13 +14561,13 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -15818,15 +15818,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15834,29 +15834,29 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -15865,43 +15865,43 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; SI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; SI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p3) :: (load (s8) from unknown-address + 16, addrspace 3)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p3) :: (load (s8) from unknown-address + 17, addrspace 3)
; SI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; SI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p3) :: (load (s8) from unknown-address + 18, addrspace 3)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load (s8) from unknown-address + 19, addrspace 3)
; SI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; SI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; SI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p3) :: (load (s8) from unknown-address + 20, addrspace 3)
- ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p3) :: (load (s8) from unknown-address + 21, addrspace 3)
; SI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; SI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p3) :: (load (s8) from unknown-address + 22, addrspace 3)
- ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load (s8) from unknown-address + 23, addrspace 3)
; SI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; SI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -15920,15 +15920,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15936,29 +15936,29 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -15967,43 +15967,43 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; CI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; CI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p3) :: (load (s8) from unknown-address + 16, addrspace 3)
- ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p3) :: (load (s8) from unknown-address + 17, addrspace 3)
; CI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p3) :: (load (s8) from unknown-address + 18, addrspace 3)
- ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load (s8) from unknown-address + 19, addrspace 3)
; CI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; CI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; CI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p3) :: (load (s8) from unknown-address + 20, addrspace 3)
- ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p3) :: (load (s8) from unknown-address + 21, addrspace 3)
; CI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p3) :: (load (s8) from unknown-address + 22, addrspace 3)
- ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load (s8) from unknown-address + 23, addrspace 3)
; CI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -16022,15 +16022,15 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -16038,29 +16038,29 @@ body: |
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-DS128-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-DS128-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; CI-DS128-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-DS128-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; CI-DS128-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -16069,43 +16069,43 @@ body: |
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; CI-DS128-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-DS128-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; CI-DS128-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-DS128-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; CI-DS128-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; CI-DS128-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; CI-DS128-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; CI-DS128-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p3) :: (load (s8) from unknown-address + 16, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p3) :: (load (s8) from unknown-address + 17, addrspace 3)
; CI-DS128-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-DS128-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; CI-DS128-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p3) :: (load (s8) from unknown-address + 18, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load (s8) from unknown-address + 19, addrspace 3)
; CI-DS128-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; CI-DS128-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; CI-DS128-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; CI-DS128-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p3) :: (load (s8) from unknown-address + 20, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p3) :: (load (s8) from unknown-address + 21, addrspace 3)
; CI-DS128-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-DS128-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; CI-DS128-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p3) :: (load (s8) from unknown-address + 22, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load (s8) from unknown-address + 23, addrspace 3)
; CI-DS128-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-DS128-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -16124,15 +16124,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -16140,29 +16140,29 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -16171,43 +16171,43 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p3) :: (load (s8) from unknown-address + 16, addrspace 3)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p3) :: (load (s8) from unknown-address + 17, addrspace 3)
; VI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p3) :: (load (s8) from unknown-address + 18, addrspace 3)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load (s8) from unknown-address + 19, addrspace 3)
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p3) :: (load (s8) from unknown-address + 20, addrspace 3)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p3) :: (load (s8) from unknown-address + 21, addrspace 3)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p3) :: (load (s8) from unknown-address + 22, addrspace 3)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load (s8) from unknown-address + 23, addrspace 3)
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -16226,15 +16226,15 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -16242,29 +16242,29 @@ body: |
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -16273,43 +16273,43 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; GFX9-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; GFX9-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p3) :: (load (s8) from unknown-address + 16, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p3) :: (load (s8) from unknown-address + 17, addrspace 3)
; GFX9-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; GFX9-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p3) :: (load (s8) from unknown-address + 18, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load (s8) from unknown-address + 19, addrspace 3)
; GFX9-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; GFX9-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; GFX9-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; GFX9-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p3) :: (load (s8) from unknown-address + 20, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; GFX9-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p3) :: (load (s8) from unknown-address + 21, addrspace 3)
; GFX9-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; GFX9-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p3) :: (load (s8) from unknown-address + 22, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load (s8) from unknown-address + 23, addrspace 3)
; GFX9-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -16329,7 +16329,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 1, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<3 x s32>) from unknown-address + 12, align 1, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -16343,15 +16343,15 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -16359,29 +16359,29 @@ body: |
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -16390,43 +16390,43 @@ body: |
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; GFX10-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; GFX10-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; GFX10-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p3) :: (load (s8) from unknown-address + 16, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p3) :: (load (s8) from unknown-address + 17, addrspace 3)
; GFX10-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; GFX10-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; GFX10-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p3) :: (load (s8) from unknown-address + 18, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load (s8) from unknown-address + 19, addrspace 3)
; GFX10-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; GFX10-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; GFX10-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; GFX10-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; GFX10-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p3) :: (load (s8) from unknown-address + 20, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; GFX10-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p3) :: (load (s8) from unknown-address + 21, addrspace 3)
; GFX10-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; GFX10-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; GFX10-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p3) :: (load (s8) from unknown-address + 22, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load (s8) from unknown-address + 23, addrspace 3)
; GFX10-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX10-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -16445,19 +16445,19 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, align 1, addrspace 3)
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load (s32) from unknown-address + 16, align 1, addrspace 3)
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s32) from unknown-address + 20, align 1, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -16472,15 +16472,15 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s8), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p3) :: (load (s8) from unknown-address + 1, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s8) from unknown-address + 2, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s8) from unknown-address + 3, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -16488,29 +16488,29 @@ body: |
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s8) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p3) :: (load (s8) from unknown-address + 5, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s8) from unknown-address + 6, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s8) from unknown-address + 7, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s8) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p3) :: (load (s8) from unknown-address + 9, addrspace 3)
; GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s8) from unknown-address + 10, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s8) from unknown-address + 11, addrspace 3)
; GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -16519,43 +16519,43 @@ body: |
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p3) :: (load (s8) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p3) :: (load (s8) from unknown-address + 13, addrspace 3)
; GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p3) :: (load (s8) from unknown-address + 14, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load (s8) from unknown-address + 15, addrspace 3)
; GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; GFX11-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; GFX11-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; GFX11-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p3) :: (load (s8) from unknown-address + 16, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p3) :: (load (s8) from unknown-address + 17, addrspace 3)
; GFX11-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; GFX11-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; GFX11-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p3) :: (load (s8) from unknown-address + 18, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; GFX11-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load (s8) from unknown-address + 19, addrspace 3)
; GFX11-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; GFX11-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; GFX11-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; GFX11-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; GFX11-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p3) :: (load (s8) from unknown-address + 20, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; GFX11-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p3) :: (load (s8) from unknown-address + 21, addrspace 3)
; GFX11-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; GFX11-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; GFX11-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p3) :: (load (s8) from unknown-address + 22, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; GFX11-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load (s8) from unknown-address + 23, addrspace 3)
; GFX11-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX11-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -16575,7 +16575,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 1, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<3 x s32>) from unknown-address + 12, align 1, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX11-UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -16602,43 +16602,43 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s16) from unknown-address + 16, addrspace 3)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load (s16) from unknown-address + 18, addrspace 3)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s16) from unknown-address + 20, addrspace 3)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s16) from unknown-address + 22, addrspace 3)
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -16655,43 +16655,43 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s16) from unknown-address + 16, addrspace 3)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load (s16) from unknown-address + 18, addrspace 3)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s16) from unknown-address + 20, addrspace 3)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s16) from unknown-address + 22, addrspace 3)
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -16708,43 +16708,43 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-DS128-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-DS128-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; CI-DS128-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-DS128-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-DS128-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; CI-DS128-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-DS128-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; CI-DS128-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-DS128-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; CI-DS128-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-DS128-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s16) from unknown-address + 16, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load (s16) from unknown-address + 18, addrspace 3)
; CI-DS128-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-DS128-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; CI-DS128-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s16) from unknown-address + 20, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s16) from unknown-address + 22, addrspace 3)
; CI-DS128-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-DS128-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -16761,43 +16761,43 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s16) from unknown-address + 16, addrspace 3)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load (s16) from unknown-address + 18, addrspace 3)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s16) from unknown-address + 20, addrspace 3)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s16) from unknown-address + 22, addrspace 3)
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -16814,43 +16814,43 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s16) from unknown-address + 16, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load (s16) from unknown-address + 18, addrspace 3)
; GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s16) from unknown-address + 20, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s16) from unknown-address + 22, addrspace 3)
; GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -16868,7 +16868,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 2, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<3 x s32>) from unknown-address + 12, align 2, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -16882,43 +16882,43 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s16) from unknown-address + 16, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load (s16) from unknown-address + 18, addrspace 3)
; GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s16) from unknown-address + 20, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s16) from unknown-address + 22, addrspace 3)
; GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -16935,19 +16935,19 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, align 2, addrspace 3)
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load (s32) from unknown-address + 16, align 2, addrspace 3)
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s32) from unknown-address + 20, align 2, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -16962,43 +16962,43 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load (s16), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s16) from unknown-address + 2, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p3) :: (load (s16) from unknown-address + 4, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s16) from unknown-address + 6, addrspace 3)
; GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p3) :: (load (s16) from unknown-address + 8, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s16) from unknown-address + 10, addrspace 3)
; GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p3) :: (load (s16) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load (s16) from unknown-address + 14, addrspace 3)
; GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p3) :: (load (s16) from unknown-address + 16, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; GFX11-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load (s16) from unknown-address + 18, addrspace 3)
; GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p3) :: (load (s16) from unknown-address + 20, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; GFX11-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load (s16) from unknown-address + 22, addrspace 3)
; GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -17016,7 +17016,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 2, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<3 x s32>) from unknown-address + 12, align 2, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX11-UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -17043,15 +17043,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; SI-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV2]](s32), [[UV3]](s32), [[LOAD3]](s32)
@@ -17067,15 +17067,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; CI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; CI-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
; CI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV2]](s32), [[UV3]](s32), [[LOAD3]](s32)
@@ -17091,19 +17091,19 @@ body: |
; CI-DS128-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-DS128-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
- ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; CI-DS128-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -17118,19 +17118,19 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; VI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -17145,19 +17145,19 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -17173,7 +17173,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 4, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -17187,19 +17187,19 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -17214,19 +17214,19 @@ body: |
; GFX10-UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -17241,19 +17241,19 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32), addrspace 3)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 4, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX11-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
- ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX11-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -17269,7 +17269,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 4, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX11-UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -17296,15 +17296,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 16, addrspace 3)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; SI-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
; SI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV2]](s32), [[UV3]](s32), [[LOAD3]](s32)
@@ -17320,15 +17320,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 16, addrspace 3)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
; CI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; CI-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
; CI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[UV2]](s32), [[UV3]](s32), [[LOAD3]](s32)
@@ -17345,13 +17345,13 @@ body: |
; CI-DS128-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 16, addrspace 3)
; CI-DS128-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; CI-DS128-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-DS128-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; CI-DS128-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
; CI-DS128-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
; CI-DS128-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
+ ; CI-DS128-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
; CI-DS128-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; CI-DS128-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-DS128-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -17367,13 +17367,13 @@ body: |
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 16, addrspace 3)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -17389,13 +17389,13 @@ body: |
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 16, addrspace 3)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -17411,7 +17411,7 @@ body: |
; GFX9-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 16, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX9-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 3)
; GFX9-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX9-UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -17426,13 +17426,13 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 16, addrspace 3)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -17448,13 +17448,13 @@ body: |
; GFX10-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 16, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX10-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
+ ; GFX10-UNALIGNED-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
; GFX10-UNALIGNED-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; GFX10-UNALIGNED-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -17470,13 +17470,13 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 16, addrspace 3)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 12, addrspace 3)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 16, addrspace 3)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -17492,7 +17492,7 @@ body: |
; GFX11-UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load (<3 x s32>), align 16, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX11-UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 3)
; GFX11-UNALIGNED-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX11-UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir
index 1a0921b..7498def 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir
@@ -599,7 +599,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -612,7 +612,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -625,7 +625,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -666,7 +666,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -679,7 +679,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -692,7 +692,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -705,7 +705,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -816,7 +816,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -829,7 +829,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -842,7 +842,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -883,7 +883,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -896,7 +896,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -909,7 +909,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -922,7 +922,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -945,15 +945,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -968,15 +968,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -991,15 +991,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1042,15 +1042,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1065,15 +1065,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1088,15 +1088,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1111,15 +1111,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -1320,7 +1320,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1333,7 +1333,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1346,7 +1346,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1359,7 +1359,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1372,7 +1372,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1385,7 +1385,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1398,7 +1398,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1411,7 +1411,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1424,7 +1424,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1437,7 +1437,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1450,7 +1450,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, align 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1474,13 +1474,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1493,13 +1493,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1512,13 +1512,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1531,7 +1531,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1544,7 +1544,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1557,7 +1557,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1570,7 +1570,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -1583,13 +1583,13 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1602,13 +1602,13 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1621,13 +1621,13 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1640,13 +1640,13 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -1670,7 +1670,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
@@ -1691,7 +1691,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
@@ -1712,7 +1712,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
@@ -1733,7 +1733,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
@@ -1754,7 +1754,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
@@ -1789,7 +1789,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
@@ -1810,7 +1810,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32)
@@ -1856,7 +1856,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1867,7 +1867,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1878,7 +1878,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1889,7 +1889,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1900,7 +1900,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1925,7 +1925,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1936,7 +1936,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1971,7 +1971,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1982,7 +1982,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -1993,7 +1993,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -2004,7 +2004,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -2015,7 +2015,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -2040,7 +2040,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -2051,7 +2051,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -2086,15 +2086,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2107,15 +2107,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2128,15 +2128,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2149,7 +2149,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -2160,7 +2160,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -2185,15 +2185,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2206,15 +2206,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2227,16 +2227,16 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2252,16 +2252,16 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -2287,15 +2287,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2303,15 +2303,15 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2326,15 +2326,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2342,15 +2342,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2365,15 +2365,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2381,15 +2381,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2404,7 +2404,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -2415,7 +2415,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
@@ -2440,15 +2440,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2456,15 +2456,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2479,15 +2479,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2495,15 +2495,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2518,15 +2518,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2535,15 +2535,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2561,15 +2561,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2578,15 +2578,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -2614,15 +2614,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2630,29 +2630,29 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2668,15 +2668,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2684,29 +2684,29 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2722,15 +2722,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2738,29 +2738,29 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2776,10 +2776,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2791,10 +2791,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -2822,15 +2822,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2838,29 +2838,29 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2876,15 +2876,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2892,29 +2892,29 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2930,15 +2930,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -2946,29 +2946,29 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -2984,15 +2984,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3000,29 +3000,29 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3048,10 +3048,10 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3063,10 +3063,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3078,10 +3078,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3093,10 +3093,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3108,10 +3108,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3139,10 +3139,10 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3154,10 +3154,10 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3195,10 +3195,10 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3210,10 +3210,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3225,10 +3225,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3240,10 +3240,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3255,10 +3255,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3286,10 +3286,10 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3301,10 +3301,10 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3342,22 +3342,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3371,22 +3371,22 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3400,22 +3400,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3429,10 +3429,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 2, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3444,10 +3444,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 2, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3475,22 +3475,22 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3504,22 +3504,22 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3533,22 +3533,22 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3562,22 +3562,22 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -3601,15 +3601,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3617,29 +3617,29 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3655,15 +3655,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3671,29 +3671,29 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3709,15 +3709,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3725,29 +3725,29 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3763,10 +3763,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3778,10 +3778,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
@@ -3809,15 +3809,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3825,29 +3825,29 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3863,15 +3863,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3879,29 +3879,29 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3917,15 +3917,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3933,29 +3933,29 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -3971,15 +3971,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -3987,29 +3987,29 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -4035,15 +4035,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4051,44 +4051,44 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4104,15 +4104,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4120,44 +4120,44 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4173,15 +4173,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4189,44 +4189,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4242,13 +4242,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4260,13 +4260,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4294,15 +4294,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4310,44 +4310,44 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4363,15 +4363,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4379,44 +4379,44 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4432,15 +4432,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4448,44 +4448,44 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4501,15 +4501,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -4517,44 +4517,44 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -4580,13 +4580,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4598,13 +4598,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4616,13 +4616,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4634,13 +4634,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4652,13 +4652,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4686,13 +4686,13 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4704,13 +4704,13 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4748,13 +4748,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4766,13 +4766,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4784,13 +4784,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4802,13 +4802,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4820,13 +4820,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4854,13 +4854,13 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4872,13 +4872,13 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -4916,29 +4916,29 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4952,29 +4952,29 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -4988,29 +4988,29 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -5024,13 +5024,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 2, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 2, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -5042,13 +5042,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 2, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 2, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -5076,29 +5076,29 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -5112,29 +5112,29 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -5148,29 +5148,29 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -5184,29 +5184,29 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -5230,15 +5230,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5246,44 +5246,44 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5299,15 +5299,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5315,44 +5315,44 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5368,15 +5368,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5384,44 +5384,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5437,13 +5437,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -5455,13 +5455,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -5489,15 +5489,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5505,44 +5505,44 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5558,15 +5558,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5574,44 +5574,44 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5627,15 +5627,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5643,44 +5643,44 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5696,15 +5696,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -5712,44 +5712,44 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -5775,7 +5775,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5786,7 +5786,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5797,7 +5797,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5808,7 +5808,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5819,7 +5819,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5844,7 +5844,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5855,7 +5855,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5890,7 +5890,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5901,7 +5901,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5912,7 +5912,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5923,7 +5923,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5934,7 +5934,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5959,7 +5959,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -5970,7 +5970,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -6005,15 +6005,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6026,15 +6026,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6047,15 +6047,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6068,7 +6068,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -6079,7 +6079,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -6104,15 +6104,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6125,15 +6125,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6146,16 +6146,16 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6172,16 +6172,16 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -6208,15 +6208,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6224,15 +6224,15 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6247,15 +6247,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6263,15 +6263,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6286,15 +6286,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6302,15 +6302,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6325,7 +6325,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -6336,7 +6336,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p1)
@@ -6361,15 +6361,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6377,15 +6377,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6400,15 +6400,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6416,15 +6416,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6439,15 +6439,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6456,15 +6456,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6483,15 +6483,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6500,15 +6500,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -6624,7 +6624,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6638,7 +6638,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6652,7 +6652,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6694,7 +6694,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6708,7 +6708,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6722,7 +6722,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6736,7 +6736,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -6760,15 +6760,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6784,15 +6784,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6808,15 +6808,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6860,15 +6860,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6884,15 +6884,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6908,15 +6908,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -6932,15 +6932,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7053,7 +7053,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7067,7 +7067,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7081,7 +7081,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7123,7 +7123,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7137,7 +7137,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7151,7 +7151,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7165,7 +7165,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7189,15 +7189,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7213,15 +7213,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7237,15 +7237,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7289,15 +7289,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7313,15 +7313,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7337,15 +7337,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7361,15 +7361,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -7484,7 +7484,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7499,7 +7499,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7514,7 +7514,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7569,7 +7569,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7584,7 +7584,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7599,7 +7599,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7614,7 +7614,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -7986,13 +7986,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -8028,13 +8028,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -8070,13 +8070,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -8110,7 +8110,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -8145,7 +8145,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -8180,7 +8180,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -8215,7 +8215,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -8250,13 +8250,13 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -8290,13 +8290,13 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -8330,13 +8330,13 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -8370,13 +8370,13 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C3]](s32)
@@ -8510,7 +8510,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8521,7 +8521,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8532,7 +8532,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8543,7 +8543,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8554,7 +8554,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8579,7 +8579,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8590,7 +8590,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -8626,15 +8626,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8642,44 +8642,44 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8694,15 +8694,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8710,44 +8710,44 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8762,15 +8762,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8778,44 +8778,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8830,13 +8830,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -8847,13 +8847,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -8878,15 +8878,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8894,44 +8894,44 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -8946,15 +8946,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -8962,44 +8962,44 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -9014,15 +9014,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -9030,44 +9030,44 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -9082,15 +9082,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -9098,44 +9098,44 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -9248,7 +9248,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -9265,7 +9265,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -9282,7 +9282,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -9328,7 +9328,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -9341,7 +9341,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -9354,7 +9354,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -9367,7 +9367,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -9389,15 +9389,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9416,15 +9416,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9443,15 +9443,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9498,16 +9498,16 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9521,16 +9521,16 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9544,16 +9544,16 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9567,16 +9567,16 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -9600,7 +9600,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -9633,7 +9633,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -9666,7 +9666,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -9699,7 +9699,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
@@ -9727,7 +9727,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
@@ -9799,7 +9799,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
@@ -9827,7 +9827,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
@@ -9911,10 +9911,10 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; SI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -9946,10 +9946,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -9981,10 +9981,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; VI-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
@@ -10016,7 +10016,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 2, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
@@ -10044,7 +10044,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 2, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
@@ -10073,11 +10073,11 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -10102,11 +10102,11 @@ body: |
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -10131,11 +10131,11 @@ body: |
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX9-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -10160,11 +10160,11 @@ body: |
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX10-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -10189,11 +10189,11 @@ body: |
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -10218,11 +10218,11 @@ body: |
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -10258,22 +10258,22 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -10307,22 +10307,22 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -10356,22 +10356,22 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -10405,7 +10405,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
@@ -10433,7 +10433,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LOAD]](<2 x s16>)
@@ -10462,11 +10462,11 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, align 1, addrspace 5)
; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, align 1, addrspace 5)
; GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -10491,11 +10491,11 @@ body: |
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), align 1, addrspace 5)
; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, align 1, addrspace 5)
; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, align 1, addrspace 5)
; GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -10519,24 +10519,24 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -10562,24 +10562,24 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -10605,24 +10605,24 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -10648,24 +10648,24 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
@@ -10702,7 +10702,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; SI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10713,7 +10713,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10724,7 +10724,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10735,7 +10735,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10746,7 +10746,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10771,7 +10771,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10782,7 +10782,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10817,7 +10817,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; SI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10828,7 +10828,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10839,7 +10839,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10850,7 +10850,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10861,7 +10861,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10886,7 +10886,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10897,7 +10897,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -10931,7 +10931,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -10941,9 +10941,9 @@ body: |
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[LOAD2]], [[C1]]
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[LOAD3]], [[C1]]
@@ -10959,7 +10959,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -10969,9 +10969,9 @@ body: |
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; CI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[LOAD2]], [[C1]]
; CI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[LOAD3]], [[C1]]
@@ -10987,7 +10987,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; VI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
@@ -10997,9 +10997,9 @@ body: |
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; VI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[LOAD2]], [[C1]]
; VI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[LOAD3]], [[C1]]
@@ -11015,7 +11015,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 2, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, align 2, addrspace 5)
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -11026,7 +11026,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 2, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, align 2, addrspace 5)
; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -11052,15 +11052,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
@@ -11074,15 +11074,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16)
@@ -11096,15 +11096,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; UNALIGNED_GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -11119,15 +11119,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32)
; UNALIGNED_GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -11151,15 +11151,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11171,15 +11171,15 @@ body: |
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL2]]
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -11197,15 +11197,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11217,15 +11217,15 @@ body: |
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL2]]
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -11243,15 +11243,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11263,15 +11263,15 @@ body: |
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL2]]
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD2]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD3]]
@@ -11289,7 +11289,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -11300,7 +11300,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
@@ -11325,32 +11325,32 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -11365,32 +11365,32 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -11405,32 +11405,32 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX11-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -11446,32 +11446,32 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[OR]](s32)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[OR1]](s32)
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX12-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -11497,7 +11497,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11508,7 +11508,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11519,7 +11519,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11530,7 +11530,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11541,7 +11541,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11566,7 +11566,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11577,7 +11577,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11612,7 +11612,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11623,7 +11623,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11634,7 +11634,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11645,7 +11645,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11656,7 +11656,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11681,7 +11681,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11692,7 +11692,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11727,15 +11727,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11748,15 +11748,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11769,15 +11769,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11790,7 +11790,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11801,7 +11801,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -11826,15 +11826,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11847,15 +11847,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11868,15 +11868,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11889,15 +11889,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
@@ -11920,15 +11920,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11936,15 +11936,15 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11959,15 +11959,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -11975,15 +11975,15 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -11998,15 +11998,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12014,15 +12014,15 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12037,7 +12037,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -12048,7 +12048,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
@@ -12073,15 +12073,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12089,15 +12089,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12112,15 +12112,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12128,15 +12128,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12151,15 +12151,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12167,15 +12167,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12190,15 +12190,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12206,15 +12206,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -12239,15 +12239,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12255,29 +12255,29 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -12292,15 +12292,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12308,29 +12308,29 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -12345,15 +12345,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12361,29 +12361,29 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -12398,10 +12398,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -12412,10 +12412,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -12440,15 +12440,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12456,29 +12456,29 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -12493,15 +12493,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12509,29 +12509,29 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -12546,15 +12546,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12562,29 +12562,29 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -12599,15 +12599,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12615,29 +12615,29 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -12662,10 +12662,10 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -12676,10 +12676,10 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -12690,10 +12690,10 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -12704,10 +12704,10 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -12718,10 +12718,10 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -12746,10 +12746,10 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -12760,10 +12760,10 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
@@ -12798,15 +12798,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12814,44 +12814,44 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12866,15 +12866,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12882,44 +12882,44 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -12934,15 +12934,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -12950,44 +12950,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13002,13 +13002,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13019,13 +13019,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13050,15 +13050,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13066,44 +13066,44 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13118,15 +13118,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13134,44 +13134,44 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13186,15 +13186,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13202,44 +13202,44 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13254,15 +13254,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13270,44 +13270,44 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -13332,13 +13332,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13349,13 +13349,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13366,13 +13366,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13383,13 +13383,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13400,13 +13400,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13431,13 +13431,13 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13448,13 +13448,13 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13489,13 +13489,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13506,13 +13506,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13523,13 +13523,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13540,13 +13540,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13557,13 +13557,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13588,13 +13588,13 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13605,13 +13605,13 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13646,29 +13646,29 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -13681,29 +13681,29 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -13716,29 +13716,29 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -13751,13 +13751,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 2, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 2, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13768,13 +13768,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 2, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 2, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -13799,29 +13799,29 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -13834,29 +13834,29 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -13869,29 +13869,29 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -13904,29 +13904,29 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
@@ -13949,15 +13949,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -13965,44 +13965,44 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -14017,15 +14017,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14033,44 +14033,44 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -14085,15 +14085,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14101,44 +14101,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -14153,13 +14153,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -14170,13 +14170,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
@@ -14201,15 +14201,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14217,44 +14217,44 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -14269,15 +14269,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14285,44 +14285,44 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -14337,15 +14337,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14353,44 +14353,44 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -14405,15 +14405,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -14421,44 +14421,44 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -14483,25 +14483,25 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; SI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; SI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -14512,25 +14512,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; CI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -14541,25 +14541,25 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; VI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -14570,25 +14570,25 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX9-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; GFX9-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; GFX9-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -14599,25 +14599,25 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX10-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; GFX10-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -14628,7 +14628,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 32, addrspace 5)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, addrspace 5)
; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -14639,7 +14639,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 32, addrspace 5)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, addrspace 5)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -14650,25 +14650,25 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -14679,25 +14679,25 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
@@ -14708,7 +14708,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 32, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -14719,7 +14719,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 32, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
@@ -14740,49 +14740,49 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; SI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; SI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; SI-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s32)
; SI-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load (s32) from unknown-address + 32, align 32, addrspace 5)
; SI-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s32)
; SI-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s32) from unknown-address + 36, addrspace 5)
; SI-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s32)
; SI-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load (s32) from unknown-address + 40, align 8, addrspace 5)
; SI-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s32)
; SI-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s32) from unknown-address + 44, addrspace 5)
; SI-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s32)
; SI-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load (s32) from unknown-address + 48, align 16, addrspace 5)
; SI-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s32)
; SI-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load (s32) from unknown-address + 52, addrspace 5)
; SI-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s32)
; SI-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load (s32) from unknown-address + 56, align 8, addrspace 5)
; SI-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s32)
; SI-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s32) from unknown-address + 60, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
; SI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
@@ -14793,49 +14793,49 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; CI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; CI-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s32)
; CI-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load (s32) from unknown-address + 32, align 32, addrspace 5)
; CI-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s32)
; CI-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s32) from unknown-address + 36, addrspace 5)
; CI-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s32)
; CI-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load (s32) from unknown-address + 40, align 8, addrspace 5)
; CI-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s32)
; CI-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s32) from unknown-address + 44, addrspace 5)
; CI-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s32)
; CI-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load (s32) from unknown-address + 48, align 16, addrspace 5)
; CI-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s32)
; CI-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load (s32) from unknown-address + 52, addrspace 5)
; CI-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s32)
; CI-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load (s32) from unknown-address + 56, align 8, addrspace 5)
; CI-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s32)
; CI-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s32) from unknown-address + 60, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
; CI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
@@ -14846,49 +14846,49 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; VI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; VI-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s32)
; VI-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load (s32) from unknown-address + 32, align 32, addrspace 5)
; VI-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s32)
; VI-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s32) from unknown-address + 36, addrspace 5)
; VI-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s32)
; VI-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load (s32) from unknown-address + 40, align 8, addrspace 5)
; VI-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s32)
; VI-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s32) from unknown-address + 44, addrspace 5)
; VI-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s32)
; VI-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load (s32) from unknown-address + 48, align 16, addrspace 5)
; VI-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s32)
; VI-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load (s32) from unknown-address + 52, addrspace 5)
; VI-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s32)
; VI-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load (s32) from unknown-address + 56, align 8, addrspace 5)
; VI-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s32)
; VI-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s32) from unknown-address + 60, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
; VI-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
@@ -14899,49 +14899,49 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX9-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; GFX9-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; GFX9-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; GFX9-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
+ ; GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s32)
; GFX9-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load (s32) from unknown-address + 32, align 32, addrspace 5)
; GFX9-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
- ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
+ ; GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s32)
; GFX9-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s32) from unknown-address + 36, addrspace 5)
; GFX9-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
+ ; GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s32)
; GFX9-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load (s32) from unknown-address + 40, align 8, addrspace 5)
; GFX9-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
- ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
+ ; GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s32)
; GFX9-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s32) from unknown-address + 44, addrspace 5)
; GFX9-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
+ ; GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s32)
; GFX9-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load (s32) from unknown-address + 48, align 16, addrspace 5)
; GFX9-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
- ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
+ ; GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s32)
; GFX9-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load (s32) from unknown-address + 52, addrspace 5)
; GFX9-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
+ ; GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s32)
; GFX9-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load (s32) from unknown-address + 56, align 8, addrspace 5)
; GFX9-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
- ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
+ ; GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s32)
; GFX9-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s32) from unknown-address + 60, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
@@ -14952,49 +14952,49 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX10-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; GFX10-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
+ ; GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s32)
; GFX10-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load (s32) from unknown-address + 32, align 32, addrspace 5)
; GFX10-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
- ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
+ ; GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s32)
; GFX10-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s32) from unknown-address + 36, addrspace 5)
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
+ ; GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s32)
; GFX10-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load (s32) from unknown-address + 40, align 8, addrspace 5)
; GFX10-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
- ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
+ ; GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s32)
; GFX10-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s32) from unknown-address + 44, addrspace 5)
; GFX10-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
+ ; GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s32)
; GFX10-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load (s32) from unknown-address + 48, align 16, addrspace 5)
; GFX10-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
- ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
+ ; GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s32)
; GFX10-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load (s32) from unknown-address + 52, addrspace 5)
; GFX10-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
+ ; GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s32)
; GFX10-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load (s32) from unknown-address + 56, align 8, addrspace 5)
; GFX10-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
- ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
+ ; GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s32)
; GFX10-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s32) from unknown-address + 60, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
@@ -15005,13 +15005,13 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 32, addrspace 5)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, addrspace 5)
; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p5) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 5)
; GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p5) :: (load (<4 x s32>) from unknown-address + 48, addrspace 5)
; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -15022,13 +15022,13 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 32, addrspace 5)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, addrspace 5)
; GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p5) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 5)
; GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p5) :: (load (<4 x s32>) from unknown-address + 48, addrspace 5)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -15039,49 +15039,49 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load (s32) from unknown-address + 32, align 32, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s32) from unknown-address + 36, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load (s32) from unknown-address + 40, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s32) from unknown-address + 44, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load (s32) from unknown-address + 48, align 16, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load (s32) from unknown-address + 52, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load (s32) from unknown-address + 56, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s32) from unknown-address + 60, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
; UNALIGNED_GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
@@ -15092,49 +15092,49 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load (s32) from unknown-address + 32, align 32, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s32) from unknown-address + 36, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load (s32) from unknown-address + 40, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C10]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s32) from unknown-address + 44, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C11]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load (s32) from unknown-address + 48, align 16, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C12]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load (s32) from unknown-address + 52, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C13]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load (s32) from unknown-address + 56, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C14]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s32) from unknown-address + 60, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
; UNALIGNED_GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
@@ -15145,13 +15145,13 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 32, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p5) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p5) :: (load (<4 x s32>) from unknown-address + 48, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; UNALIGNED_GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -15162,13 +15162,13 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 32, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p5) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p5) :: (load (<4 x s32>) from unknown-address + 48, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
@@ -15189,13 +15189,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -15207,13 +15207,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -15225,13 +15225,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -15243,13 +15243,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -15261,13 +15261,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -15293,13 +15293,13 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -15311,13 +15311,13 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -15353,15 +15353,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15369,44 +15369,44 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; SI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; SI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -15422,15 +15422,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15438,44 +15438,44 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; CI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; CI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -15491,15 +15491,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15507,44 +15507,44 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; VI-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; VI-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -15560,13 +15560,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -15578,13 +15578,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
; GFX10-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
@@ -15610,15 +15610,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15626,44 +15626,44 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX9-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -15679,15 +15679,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15695,44 +15695,44 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
; UNALIGNED_GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
; UNALIGNED_GFX10-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[OR7]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[OR6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
@@ -15748,15 +15748,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15765,15 +15765,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -15783,30 +15783,30 @@ body: |
; UNALIGNED_GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX11-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -15825,15 +15825,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -15842,15 +15842,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -15860,30 +15860,30 @@ body: |
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[ZEXT]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[SHL8]], [[ZEXTLOAD8]]
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[OR8]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[OR7]]
; UNALIGNED_GFX12-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR9]](s32)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[ZEXTLOAD11]]
@@ -15912,19 +15912,19 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; SI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; SI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -15938,19 +15938,19 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; CI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; CI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -15964,19 +15964,19 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; VI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; VI-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -15990,19 +15990,19 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -16016,19 +16016,19 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX10-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -16042,7 +16042,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p5) :: (load (<2 x s64>), align 32, addrspace 5)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p5) :: (load (s64) from unknown-address + 16, align 16, addrspace 5)
; GFX11-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -16056,7 +16056,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p5) :: (load (<2 x s64>), align 32, addrspace 5)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p5) :: (load (s64) from unknown-address + 16, align 16, addrspace 5)
; GFX12-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -16070,19 +16070,19 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; UNALIGNED_GFX9-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -16096,19 +16096,19 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; UNALIGNED_GFX10-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -16122,7 +16122,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p5) :: (load (<2 x s64>), align 32, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p5) :: (load (s64) from unknown-address + 16, align 16, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; UNALIGNED_GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -16136,7 +16136,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p5) :: (load (<2 x s64>), align 32, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p5) :: (load (s64) from unknown-address + 16, align 16, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; UNALIGNED_GFX12-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -16162,25 +16162,25 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; SI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; SI-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -16192,25 +16192,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; CI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; CI-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -16222,25 +16222,25 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; VI-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; VI-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -16252,25 +16252,25 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX9-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; GFX9-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -16282,25 +16282,25 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX10-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; GFX10-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; GFX10-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -16312,7 +16312,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p5) :: (load (<2 x s64>), align 32, addrspace 5)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s64>) from unknown-address + 16, addrspace 5)
; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -16323,7 +16323,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p5) :: (load (<2 x s64>), align 32, addrspace 5)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s64>) from unknown-address + 16, addrspace 5)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -16334,25 +16334,25 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -16364,25 +16364,25 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 32, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 16, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
@@ -16394,7 +16394,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p5) :: (load (<2 x s64>), align 32, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s64>) from unknown-address + 16, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; UNALIGNED_GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -16405,7 +16405,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p5) :: (load (<2 x s64>), align 32, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s64>) from unknown-address + 16, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; UNALIGNED_GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
@@ -16426,13 +16426,13 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -16444,13 +16444,13 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -16462,13 +16462,13 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -16480,13 +16480,13 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -16498,13 +16498,13 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -16532,13 +16532,13 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -16550,13 +16550,13 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
@@ -16594,25 +16594,25 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 8, addrspace 5)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; SI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; SI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -16624,25 +16624,25 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 8, addrspace 5)
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; CI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; CI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -16654,25 +16654,25 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 8, addrspace 5)
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; VI-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; VI-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -16684,25 +16684,25 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 8, addrspace 5)
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX9-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; GFX9-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; GFX9-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -16714,25 +16714,25 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 8, addrspace 5)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; GFX10-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; GFX10-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -16744,7 +16744,7 @@ body: |
; GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 8, addrspace 5)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, align 8, addrspace 5)
; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -16756,7 +16756,7 @@ body: |
; GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 8, addrspace 5)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, align 8, addrspace 5)
; GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -16768,25 +16768,25 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -16798,25 +16798,25 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load (s32) from unknown-address + 24, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s32) from unknown-address + 28, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>)
@@ -16828,7 +16828,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 8, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, align 8, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -16840,7 +16840,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p5) :: (load (<4 x s32>), align 8, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<4 x s32>) from unknown-address + 16, align 8, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; UNALIGNED_GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
@@ -16862,7 +16862,7 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -16874,7 +16874,7 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -16886,7 +16886,7 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -16898,7 +16898,7 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -16910,7 +16910,7 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -16938,7 +16938,7 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -16950,7 +16950,7 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p3>) = G_BITCAST [[BUILD_VECTOR]](<2 x s32>)
@@ -18219,15 +18219,15 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -18235,29 +18235,29 @@ body: |
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; SI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; SI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; SI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -18266,43 +18266,43 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; SI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; SI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; SI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; SI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; SI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; SI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p5) :: (load (s8) from unknown-address + 16, addrspace 5)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p5) :: (load (s8) from unknown-address + 17, addrspace 5)
; SI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; SI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p5) :: (load (s8) from unknown-address + 18, addrspace 5)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load (s8) from unknown-address + 19, addrspace 5)
; SI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; SI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; SI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; SI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p5) :: (load (s8) from unknown-address + 20, addrspace 5)
- ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; SI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p5) :: (load (s8) from unknown-address + 21, addrspace 5)
; SI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; SI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p5) :: (load (s8) from unknown-address + 22, addrspace 5)
- ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load (s8) from unknown-address + 23, addrspace 5)
; SI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; SI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -18321,15 +18321,15 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -18337,29 +18337,29 @@ body: |
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; CI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; CI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; CI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -18368,43 +18368,43 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; CI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; CI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; CI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; CI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; CI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; CI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; CI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p5) :: (load (s8) from unknown-address + 16, addrspace 5)
- ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p5) :: (load (s8) from unknown-address + 17, addrspace 5)
; CI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; CI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p5) :: (load (s8) from unknown-address + 18, addrspace 5)
- ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load (s8) from unknown-address + 19, addrspace 5)
; CI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; CI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; CI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; CI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p5) :: (load (s8) from unknown-address + 20, addrspace 5)
- ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; CI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p5) :: (load (s8) from unknown-address + 21, addrspace 5)
; CI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; CI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p5) :: (load (s8) from unknown-address + 22, addrspace 5)
- ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load (s8) from unknown-address + 23, addrspace 5)
; CI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -18423,15 +18423,15 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -18439,29 +18439,29 @@ body: |
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; VI-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; VI-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; VI-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -18470,43 +18470,43 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; VI-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; VI-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; VI-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; VI-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; VI-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; VI-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p5) :: (load (s8) from unknown-address + 16, addrspace 5)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p5) :: (load (s8) from unknown-address + 17, addrspace 5)
; VI-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; VI-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p5) :: (load (s8) from unknown-address + 18, addrspace 5)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load (s8) from unknown-address + 19, addrspace 5)
; VI-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; VI-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; VI-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; VI-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p5) :: (load (s8) from unknown-address + 20, addrspace 5)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; VI-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p5) :: (load (s8) from unknown-address + 21, addrspace 5)
; VI-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; VI-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p5) :: (load (s8) from unknown-address + 22, addrspace 5)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load (s8) from unknown-address + 23, addrspace 5)
; VI-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -18525,19 +18525,19 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 1, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, align 1, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -18552,19 +18552,19 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 1, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 1, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 1, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 1, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, align 1, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -18580,7 +18580,7 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 1, addrspace 5)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 1, addrspace 5)
; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -18595,7 +18595,7 @@ body: |
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 1, addrspace 5)
; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 1, addrspace 5)
; GFX12-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -18609,15 +18609,15 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -18625,29 +18625,29 @@ body: |
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -18656,43 +18656,43 @@ body: |
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX9-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; UNALIGNED_GFX9-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p5) :: (load (s8) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p5) :: (load (s8) from unknown-address + 17, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p5) :: (load (s8) from unknown-address + 18, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load (s8) from unknown-address + 19, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; UNALIGNED_GFX9-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p5) :: (load (s8) from unknown-address + 20, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p5) :: (load (s8) from unknown-address + 21, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p5) :: (load (s8) from unknown-address + 22, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load (s8) from unknown-address + 23, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -18711,15 +18711,15 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -18727,29 +18727,29 @@ body: |
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -18758,43 +18758,43 @@ body: |
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; UNALIGNED_GFX10-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p5) :: (load (s8) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p5) :: (load (s8) from unknown-address + 17, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p5) :: (load (s8) from unknown-address + 18, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load (s8) from unknown-address + 19, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; UNALIGNED_GFX10-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p5) :: (load (s8) from unknown-address + 20, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p5) :: (load (s8) from unknown-address + 21, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p5) :: (load (s8) from unknown-address + 22, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load (s8) from unknown-address + 23, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -18813,15 +18813,15 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -18829,29 +18829,29 @@ body: |
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -18860,43 +18860,43 @@ body: |
; UNALIGNED_GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; UNALIGNED_GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX11-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; UNALIGNED_GFX11-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p5) :: (load (s8) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p5) :: (load (s8) from unknown-address + 17, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p5) :: (load (s8) from unknown-address + 18, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load (s8) from unknown-address + 19, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; UNALIGNED_GFX11-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p5) :: (load (s8) from unknown-address + 20, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p5) :: (load (s8) from unknown-address + 21, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p5) :: (load (s8) from unknown-address + 22, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load (s8) from unknown-address + 23, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -18915,15 +18915,15 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s8), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p5) :: (load (s8) from unknown-address + 1, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s8) from unknown-address + 2, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s8) from unknown-address + 3, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -18931,29 +18931,29 @@ body: |
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s8) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p5) :: (load (s8) from unknown-address + 5, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s8) from unknown-address + 6, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s8) from unknown-address + 7, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[OR4]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[OR3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s8) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD8]](p5) :: (load (s8) from unknown-address + 9, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD7]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[SHL6]], [[ZEXTLOAD6]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s8) from unknown-address + 10, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s8) from unknown-address + 11, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[SHL7]], [[ZEXTLOAD8]]
@@ -18962,43 +18962,43 @@ body: |
; UNALIGNED_GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; UNALIGNED_GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX12-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD11]](p5) :: (load (s8) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD12]](p5) :: (load (s8) from unknown-address + 13, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD10]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[SHL9]], [[ZEXTLOAD9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD13]](p5) :: (load (s8) from unknown-address + 14, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load (s8) from unknown-address + 15, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[SHL10]], [[ZEXTLOAD11]]
; UNALIGNED_GFX12-NEXT: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[OR10]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[SHL11]], [[OR9]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD15]](p5) :: (load (s8) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD16]](p5) :: (load (s8) from unknown-address + 17, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD13]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[SHL12]], [[ZEXTLOAD12]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD14:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD17]](p5) :: (load (s8) from unknown-address + 18, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD17]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load (s8) from unknown-address + 19, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[SHL13]], [[ZEXTLOAD14]]
; UNALIGNED_GFX12-NEXT: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[OR13]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[SHL14]], [[OR12]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD15:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD19]](p5) :: (load (s8) from unknown-address + 20, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD16:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD20]](p5) :: (load (s8) from unknown-address + 21, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD16]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR15:%[0-9]+]]:_(s32) = G_OR [[SHL15]], [[ZEXTLOAD15]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD17:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD21]](p5) :: (load (s8) from unknown-address + 22, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD21]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load (s8) from unknown-address + 23, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR16:%[0-9]+]]:_(s32) = G_OR [[SHL16]], [[ZEXTLOAD17]]
@@ -19030,43 +19030,43 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; SI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; SI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; SI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; SI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; SI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; SI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s16) from unknown-address + 16, addrspace 5)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s16) from unknown-address + 18, addrspace 5)
; SI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; SI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; SI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s16) from unknown-address + 20, addrspace 5)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s16) from unknown-address + 22, addrspace 5)
; SI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; SI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -19083,43 +19083,43 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; CI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; CI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; CI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; CI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; CI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; CI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; CI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s16) from unknown-address + 16, addrspace 5)
- ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s16) from unknown-address + 18, addrspace 5)
; CI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; CI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; CI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; CI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s16) from unknown-address + 20, addrspace 5)
- ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s16) from unknown-address + 22, addrspace 5)
; CI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; CI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -19136,43 +19136,43 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; VI-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; VI-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; VI-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; VI-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; VI-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; VI-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s16) from unknown-address + 16, addrspace 5)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s16) from unknown-address + 18, addrspace 5)
; VI-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; VI-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; VI-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s16) from unknown-address + 20, addrspace 5)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s16) from unknown-address + 22, addrspace 5)
; VI-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; VI-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -19189,19 +19189,19 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 2, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 2, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 2, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, align 2, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19216,19 +19216,19 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 2, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, align 2, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 2, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, align 2, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, align 2, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, align 2, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19244,7 +19244,7 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 2, addrspace 5)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 2, addrspace 5)
; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -19259,7 +19259,7 @@ body: |
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 2, addrspace 5)
; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 2, addrspace 5)
; GFX12-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -19273,43 +19273,43 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s16) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s16) from unknown-address + 18, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; UNALIGNED_GFX9-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s16) from unknown-address + 20, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s16) from unknown-address + 22, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -19326,43 +19326,43 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s16) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s16) from unknown-address + 18, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; UNALIGNED_GFX10-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s16) from unknown-address + 20, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s16) from unknown-address + 22, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -19379,43 +19379,43 @@ body: |
; UNALIGNED_GFX11-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX11-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX11-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX11-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; UNALIGNED_GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX11-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s16) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s16) from unknown-address + 18, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; UNALIGNED_GFX11-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s16) from unknown-address + 20, addrspace 5)
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s16) from unknown-address + 22, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX11-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -19432,43 +19432,43 @@ body: |
; UNALIGNED_GFX12-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p5) :: (load (s16), addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 2, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNALIGNED_GFX12-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; UNALIGNED_GFX12-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p5) :: (load (s16) from unknown-address + 4, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s16) from unknown-address + 6, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD1]]
; UNALIGNED_GFX12-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p5) :: (load (s16) from unknown-address + 8, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s16) from unknown-address + 10, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LOAD2]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[ZEXTLOAD2]]
; UNALIGNED_GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32), [[OR2]](s32)
; UNALIGNED_GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX12-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p5) :: (load (s16) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load (s16) from unknown-address + 14, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[LOAD3]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD7]](p5) :: (load (s16) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load (s16) from unknown-address + 18, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD4]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD4]]
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s32)
; UNALIGNED_GFX12-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD9]](p5) :: (load (s16) from unknown-address + 20, addrspace 5)
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load (s16) from unknown-address + 22, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[LOAD5]], [[C1]](s32)
; UNALIGNED_GFX12-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[SHL5]], [[ZEXTLOAD5]]
@@ -19498,19 +19498,19 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; SI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19525,19 +19525,19 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; CI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19552,19 +19552,19 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; VI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19579,19 +19579,19 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19606,19 +19606,19 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19634,7 +19634,7 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 4, addrspace 5)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 5)
; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -19649,7 +19649,7 @@ body: |
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 4, addrspace 5)
; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 5)
; GFX12-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -19663,19 +19663,19 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19690,19 +19690,19 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19718,7 +19718,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 4, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; UNALIGNED_GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -19733,7 +19733,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 4, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; UNALIGNED_GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -19760,19 +19760,19 @@ body: |
; SI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; SI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 16, addrspace 5)
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; SI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; SI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; SI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; SI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; SI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; SI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; SI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; SI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19787,19 +19787,19 @@ body: |
; CI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 16, addrspace 5)
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; CI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; CI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; CI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; CI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; CI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; CI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; CI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; CI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; CI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19814,19 +19814,19 @@ body: |
; VI-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; VI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 16, addrspace 5)
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; VI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; VI-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; VI-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; VI-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; VI-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; VI-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; VI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; VI-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19841,19 +19841,19 @@ body: |
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 16, addrspace 5)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19868,19 +19868,19 @@ body: |
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 16, addrspace 5)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19896,7 +19896,7 @@ body: |
; GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 16, addrspace 5)
; GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 5)
; GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -19911,7 +19911,7 @@ body: |
; GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 16, addrspace 5)
; GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 5)
; GFX12-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -19925,19 +19925,19 @@ body: |
; UNALIGNED_GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX9-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 16, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; UNALIGNED_GFX9-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; UNALIGNED_GFX9-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; UNALIGNED_GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19952,19 +19952,19 @@ body: |
; UNALIGNED_GFX10-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; UNALIGNED_GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load (s32), align 16, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load (s32) from unknown-address + 8, align 8, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
; UNALIGNED_GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load (s32) from unknown-address + 12, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load (s32) from unknown-address + 16, addrspace 5)
- ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
+ ; UNALIGNED_GFX10-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C1]](s32)
; UNALIGNED_GFX10-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load (s32) from unknown-address + 20, addrspace 5)
; UNALIGNED_GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32)
; UNALIGNED_GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>)
@@ -19980,7 +19980,7 @@ body: |
; UNALIGNED_GFX11-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 16, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; UNALIGNED_GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX11-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX11-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 5)
; UNALIGNED_GFX11-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; UNALIGNED_GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
@@ -19995,7 +19995,7 @@ body: |
; UNALIGNED_GFX12-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p5) :: (load (<3 x s32>), align 16, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
; UNALIGNED_GFX12-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED_GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:_(p5) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED_GFX12-NEXT: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p5) :: (load (<3 x s32>) from unknown-address + 12, align 4, addrspace 5)
; UNALIGNED_GFX12-NEXT: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>)
; UNALIGNED_GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-global.mir
index 181cd13..477239a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-global.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-global.mir
@@ -27,6 +27,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LOAD]], 1
; GFX8-NEXT: $vgpr0 = COPY [[SEXT_INREG]](s32)
+ ;
; GFX6-LABEL: name: test_sextload_global_i32_i1
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -52,6 +53,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LOAD]], 7
; GFX8-NEXT: $vgpr0 = COPY [[SEXT_INREG]](s32)
+ ;
; GFX6-LABEL: name: test_sextload_global_i32_i7
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -75,19 +77,20 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX8-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX8-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX8-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[SEXTLOAD]], [[C1]](s32)
; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX8-NEXT: $vgpr0 = COPY [[OR]](s32)
+ ;
; GFX6-LABEL: name: test_sextload_global_i32_i24
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
; GFX6-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX6-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX6-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX6-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[SEXTLOAD]], [[C1]](s32)
@@ -111,6 +114,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s32), addrspace 1)
; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LOAD]], 30
; GFX8-NEXT: $vgpr0 = COPY [[SEXT_INREG]](s32)
+ ;
; GFX6-LABEL: name: test_sextload_global_i32_i30
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -136,6 +140,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s32), addrspace 1)
; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LOAD]], 31
; GFX8-NEXT: $vgpr0 = COPY [[SEXT_INREG]](s32)
+ ;
; GFX6-LABEL: name: test_sextload_global_i32_i31
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -160,6 +165,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[SEXTLOAD]](s32)
+ ;
; GFX6-LABEL: name: test_sextload_global_i32_i8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -183,6 +189,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[SEXTLOAD]](s32)
+ ;
; GFX6-LABEL: name: test_sextload_global_i32_i16
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -205,6 +212,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[SEXTLOAD]](s32)
+ ;
; GFX6-LABEL: name: test_sextload_global_i31_i8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -229,6 +237,7 @@ body: |
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SEXT]](s64)
+ ;
; GFX6-LABEL: name: test_sextload_global_i64_i8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -253,6 +262,7 @@ body: |
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX8-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SEXT]](s64)
+ ;
; GFX6-LABEL: name: test_sextload_global_i64_i16
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -277,6 +287,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s32), addrspace 1)
; GFX8-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s32)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SEXT]](s64)
+ ;
; GFX6-LABEL: name: test_sextload_global_i64_i32
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -301,13 +312,14 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[SEXTLOAD]](s32)
+ ;
; GFX6-LABEL: name: test_sextload_global_s32_from_2_align1
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
; GFX6-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX6-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX6-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX6-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[SEXTLOAD]], [[C1]](s32)
@@ -331,13 +343,14 @@ body: |
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX8-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SEXT]](s64)
+ ;
; GFX6-LABEL: name: test_sextload_global_s64_from_2_align1
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
; GFX6-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX6-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX6-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX6-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[SEXTLOAD]], [[C1]](s32)
@@ -361,6 +374,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(<2 x s16>) = G_SEXTLOAD [[COPY]](p1) :: (load (<2 x s8>), addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[SEXTLOAD]](<2 x s16>)
+ ;
; GFX6-LABEL: name: test_sextload_global_v2i16_from_v2s8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -384,6 +398,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(<2 x s32>) = G_SEXTLOAD [[COPY]](p1) :: (load (<2 x s8>), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SEXTLOAD]](<2 x s32>)
+ ;
; GFX6-LABEL: name: test_sextload_global_v2i32_from_v2s8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -407,6 +422,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(<2 x s32>) = G_SEXTLOAD [[COPY]](p1) :: (load (<2 x s16>), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SEXTLOAD]](<2 x s32>)
+ ;
; GFX6-LABEL: name: test_sextload_global_v2i32_from_v2s16
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -430,6 +446,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(<2 x s64>) = G_SEXTLOAD [[COPY]](p1) :: (load (<2 x s16>), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[SEXTLOAD]](<2 x s64>)
+ ;
; GFX6-LABEL: name: test_sextload_global_v2i64_from_v2s16
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -453,6 +470,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(<2 x s64>) = G_SEXTLOAD [[COPY]](p1) :: (load (<2 x s32>), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[SEXTLOAD]](<2 x s64>)
+ ;
; GFX6-LABEL: name: test_sextload_global_v2i64_from_v2s32
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -476,6 +494,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s128) = G_SEXTLOAD [[COPY]](p1) :: (load (s64), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[SEXTLOAD]](s128)
+ ;
; GFX6-LABEL: name: test_sextload_global_s128_8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir
index 1080b7dc..2b84c6b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir
@@ -160,7 +160,7 @@ body: |
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
;
@@ -180,7 +180,7 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C]](s16)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
@@ -290,7 +290,7 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
;
@@ -303,7 +303,7 @@ body: |
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
; CI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
;
@@ -316,7 +316,7 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
;
@@ -329,7 +329,7 @@ body: |
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
; GFX9-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
@@ -353,7 +353,7 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
;
@@ -366,7 +366,7 @@ body: |
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; CI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
;
@@ -379,7 +379,7 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
;
@@ -392,7 +392,7 @@ body: |
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; GFX9-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
@@ -416,13 +416,13 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
@@ -436,7 +436,7 @@ body: |
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), align 1, addrspace 1)
; CI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
;
@@ -449,12 +449,12 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
@@ -469,7 +469,7 @@ body: |
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), align 1, addrspace 1)
; GFX9-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
@@ -564,18 +564,18 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
;
@@ -595,18 +595,18 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
@@ -637,7 +637,7 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
;
@@ -657,7 +657,7 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
;
@@ -726,18 +726,18 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
;
@@ -758,18 +758,18 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
@@ -801,7 +801,7 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
;
@@ -822,7 +822,7 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
;
@@ -890,30 +890,30 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C5]]
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C4]](s32)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[LSHR1]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C5]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY4]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
; SI-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
;
@@ -926,7 +926,7 @@ body: |
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s32), align 1, addrspace 1)
; CI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; CI-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, align 1, addrspace 1)
@@ -940,30 +940,30 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
; VI-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C4]](s16)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
; VI-NEXT: G_STORE [[TRUNC3]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
@@ -977,7 +977,7 @@ body: |
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s32), align 1, addrspace 1)
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; GFX9-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, align 1, addrspace 1)
@@ -1002,12 +1002,12 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
@@ -1022,7 +1022,7 @@ body: |
; CI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C]](s32)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s32), align 2, addrspace 1)
; CI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; CI-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
@@ -1036,12 +1036,12 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
@@ -1056,7 +1056,7 @@ body: |
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C]](s32)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s32), align 2, addrspace 1)
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; GFX9-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
@@ -1106,39 +1106,39 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C5]]
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C4]](s32)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[LSHR1]], [[COPY4]](s32)
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C5]]
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[LSHR4]], [[COPY7]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
;
@@ -1158,40 +1158,40 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C4]](s16)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR6]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
@@ -1222,19 +1222,19 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -1254,19 +1254,19 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -1411,39 +1411,39 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C5]]
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C4]](s32)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[LSHR1]], [[COPY4]](s32)
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C5]]
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[LSHR4]], [[COPY7]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
;
@@ -1464,40 +1464,40 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C4]](s16)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR6]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
@@ -1529,19 +1529,19 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -1562,19 +1562,19 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -1719,39 +1719,39 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C5]]
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C4]](s32)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[LSHR1]], [[COPY4]](s32)
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C5]]
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[LSHR4]], [[COPY7]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
;
@@ -1772,40 +1772,40 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C4]](s16)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR6]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
@@ -1837,19 +1837,19 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -1870,19 +1870,19 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -2027,34 +2027,34 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
;
@@ -2075,35 +2075,35 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
@@ -2135,14 +2135,14 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -2163,14 +2163,14 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -2316,34 +2316,34 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
;
@@ -2366,35 +2366,35 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
@@ -2428,14 +2428,14 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -2458,14 +2458,14 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
@@ -2629,31 +2629,31 @@ body: |
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[BITCAST]], [[C2]]
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: G_STORE [[BITCAST]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C2]]
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY2]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C3]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[BITCAST1]], [[C2]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; SI-NEXT: G_STORE [[BITCAST1]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C2]]
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY4]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C3]](s64)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
;
@@ -2682,28 +2682,28 @@ body: |
; VI-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C1]](s16)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: G_STORE [[BITCAST]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C2]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C1]](s16)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; VI-NEXT: G_STORE [[BITCAST1]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C1]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C2]](s64)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
@@ -2738,13 +2738,13 @@ body: |
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
; SI-NEXT: G_STORE [[BITCAST]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: G_STORE [[BITCAST1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
; CI-LABEL: name: test_store_global_v4s16_align2
@@ -2767,13 +2767,13 @@ body: |
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
; VI-NEXT: G_STORE [[BITCAST]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: G_STORE [[BITCAST1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v4s16_align2
@@ -2917,50 +2917,50 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
;
@@ -2981,52 +2981,52 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
@@ -3058,21 +3058,21 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
;
@@ -3093,21 +3093,21 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
;
@@ -3137,7 +3137,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_v3s32_align4
@@ -3180,7 +3180,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_v3s32_align8
@@ -3223,7 +3223,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), align 16, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_v3s32_align16
@@ -3267,66 +3267,66 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
;
@@ -3347,69 +3347,69 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
@@ -3441,28 +3441,28 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -3483,28 +3483,28 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -3649,75 +3649,75 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C5]]
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C4]](s32)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[LSHR1]], [[COPY4]](s32)
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C5]]
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[LSHR4]], [[COPY7]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY [[UV1]](s64)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[COPY8]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY8]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[TRUNC2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C5]]
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[LSHR8]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR7]](s64)
; SI-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[TRUNC3]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[COPY13]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; SI-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C5]]
; SI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY14]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[C4]](s32)
; SI-NEXT: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[LSHR11]], [[COPY15]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR13]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
;
@@ -3738,78 +3738,78 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C4]](s16)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR6]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[UV1]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[COPY5]], [[COPY6]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
; VI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[TRUNC6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s64)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR9]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR8]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC8]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[TRUNC9:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR7]](s64)
; VI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[TRUNC9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR7]](s64)
; VI-NEXT: [[LSHR12:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC10]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C5]](s64)
; VI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR12]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR11]](s32)
; VI-NEXT: [[LSHR13:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC11]], [[C4]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C5]](s64)
; VI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR13]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
@@ -3841,37 +3841,37 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[UV1]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[COPY5]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[TRUNC2]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR3]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[TRUNC3]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C2]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; SI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -3892,37 +3892,37 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[UV1]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[COPY5]], [[COPY6]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
; VI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[TRUNC2]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR3]](s64)
; VI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[TRUNC3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C2]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -4068,66 +4068,66 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
;
@@ -4150,69 +4150,69 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
@@ -4246,28 +4246,28 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -4290,28 +4290,28 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -4470,66 +4470,66 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
;
@@ -4552,69 +4552,69 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
@@ -4648,28 +4648,28 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -4692,28 +4692,28 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -4872,50 +4872,50 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
;
@@ -4938,52 +4938,52 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
@@ -5017,21 +5017,21 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
;
@@ -5054,21 +5054,21 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
;
@@ -5100,7 +5100,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_s96_align4
@@ -5147,7 +5147,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_s96_align8
@@ -5194,7 +5194,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), align 16, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_s96_align16
@@ -5242,66 +5242,66 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
;
@@ -5324,69 +5324,69 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
@@ -5420,28 +5420,28 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -5464,28 +5464,28 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
;
@@ -5643,82 +5643,82 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; SI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; SI-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; SI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY13]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; SI-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]]
; SI-NEXT: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[COPY14]](s32)
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY13]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR13]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; SI-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[LSHR12]], [[COPY15]](s32)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; SI-NEXT: G_STORE [[LSHR14]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
;
@@ -5731,7 +5731,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 1, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5s32_align1
@@ -5744,86 +5744,86 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; VI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; VI-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; VI-NEXT: [[LSHR13:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC8]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; VI-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR13]](s16)
; VI-NEXT: G_STORE [[ANYEXT8]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; VI-NEXT: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR12]](s32)
; VI-NEXT: [[LSHR14:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC9]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; VI-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR14]](s16)
; VI-NEXT: G_STORE [[ANYEXT9]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
@@ -5837,7 +5837,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -5860,35 +5860,35 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
;
@@ -5901,7 +5901,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 2, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5s32_align2
@@ -5914,35 +5914,35 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
;
@@ -5955,7 +5955,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -5977,7 +5977,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v5s32_align4
@@ -5989,7 +5989,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5s32_align4
@@ -6001,7 +6001,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v5s32_align4
@@ -6013,7 +6013,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -6035,7 +6035,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_v5s32_align8
@@ -6047,7 +6047,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5s32_align8
@@ -6059,7 +6059,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v5s32_align8
@@ -6071,7 +6071,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -6093,7 +6093,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v5s32_align16
@@ -6105,7 +6105,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5s32_align16
@@ -6117,7 +6117,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v5s32_align16
@@ -6129,7 +6129,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -6152,82 +6152,82 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; SI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; SI-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; SI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY13]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; SI-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]]
; SI-NEXT: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[COPY14]](s32)
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY13]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR13]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; SI-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[LSHR12]], [[COPY15]](s32)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; SI-NEXT: G_STORE [[LSHR14]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
;
@@ -6241,7 +6241,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 1, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5p3_align1
@@ -6255,86 +6255,86 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; VI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; VI-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; VI-NEXT: [[LSHR13:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC8]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; VI-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR13]](s16)
; VI-NEXT: G_STORE [[ANYEXT8]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; VI-NEXT: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR12]](s32)
; VI-NEXT: [[LSHR14:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC9]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; VI-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR14]](s16)
; VI-NEXT: G_STORE [[ANYEXT9]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
@@ -6349,7 +6349,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -6373,35 +6373,35 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
;
@@ -6415,7 +6415,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 2, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5p3_align2
@@ -6429,35 +6429,35 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
;
@@ -6471,7 +6471,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -6494,7 +6494,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v5p3_align4
@@ -6507,7 +6507,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5p3_align4
@@ -6520,7 +6520,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v5p3_align4
@@ -6533,7 +6533,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -6556,7 +6556,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_v5p3_align8
@@ -6569,7 +6569,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5p3_align8
@@ -6582,7 +6582,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v5p3_align8
@@ -6595,7 +6595,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -6618,7 +6618,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v5p3_align16
@@ -6631,7 +6631,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v5p3_align16
@@ -6644,7 +6644,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v5p3_align16
@@ -6657,7 +6657,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -6680,7 +6680,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v10s16_align4
@@ -6693,7 +6693,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v10s16_align4
@@ -6706,7 +6706,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v10s16_align4
@@ -6719,7 +6719,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<10 x s16>) = G_IMPLICIT_DEF
@@ -6746,13 +6746,13 @@ body: |
; SI-NEXT: [[BITCAST2:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s16>)
; SI-NEXT: G_STORE [[BITCAST2]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[BITCAST]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 16, align 16, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; SI-NEXT: G_STORE [[BITCAST1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 20, align 4, addrspace 1)
;
; CI-LABEL: name: test_store_global_v11s16_align4
@@ -6769,13 +6769,13 @@ body: |
; CI-NEXT: [[BITCAST2:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s16>)
; CI-NEXT: G_STORE [[BITCAST2]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[BITCAST]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 16, align 16, addrspace 1)
; CI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
; CI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
; CI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; CI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; CI-NEXT: G_STORE [[BITCAST1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 20, align 4, addrspace 1)
;
; VI-LABEL: name: test_store_global_v11s16_align4
@@ -6792,13 +6792,13 @@ body: |
; VI-NEXT: [[BITCAST2:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s16>)
; VI-NEXT: G_STORE [[BITCAST2]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[BITCAST]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 16, align 16, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[BITCAST1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 20, align 4, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v11s16_align4
@@ -6815,13 +6815,13 @@ body: |
; GFX9-NEXT: [[BITCAST2:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s16>)
; GFX9-NEXT: G_STORE [[BITCAST2]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[BITCAST]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 16, align 16, addrspace 1)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s64)
; GFX9-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; GFX9-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; GFX9-NEXT: G_STORE [[BITCAST1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 20, align 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<11 x s16>) = G_IMPLICIT_DEF
@@ -6844,7 +6844,7 @@ body: |
; SI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[UV]](<2 x s32>), [[UV1]](<2 x s32>)
; SI-NEXT: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV2]](<2 x s32>), [[PTR_ADD]](p1) :: (store (<2 x s32>) into unknown-address + 16, align 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v12s16_align4
@@ -6857,7 +6857,7 @@ body: |
; CI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[UV]](<2 x s32>), [[UV1]](<2 x s32>)
; CI-NEXT: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV2]](<2 x s32>), [[PTR_ADD]](p1) :: (store (<2 x s32>) into unknown-address + 16, align 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v12s16_align4
@@ -6870,7 +6870,7 @@ body: |
; VI-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[UV]](<2 x s32>), [[UV1]](<2 x s32>)
; VI-NEXT: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV2]](<2 x s32>), [[PTR_ADD]](p1) :: (store (<2 x s32>) into unknown-address + 16, align 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v12s16_align4
@@ -6883,7 +6883,7 @@ body: |
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[UV]](<2 x s32>), [[UV1]](<2 x s32>)
; GFX9-NEXT: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV2]](<2 x s32>), [[PTR_ADD]](p1) :: (store (<2 x s32>) into unknown-address + 16, align 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<12 x s16>) = G_IMPLICIT_DEF
@@ -6907,82 +6907,82 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; SI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; SI-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; SI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY13]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; SI-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]]
; SI-NEXT: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[COPY14]](s32)
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY13]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR13]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; SI-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[LSHR12]], [[COPY15]](s32)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; SI-NEXT: G_STORE [[LSHR14]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
;
@@ -6996,7 +6996,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 1, addrspace 1)
;
; VI-LABEL: name: test_store_global_s160_align1
@@ -7010,86 +7010,86 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; VI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; VI-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; VI-NEXT: [[LSHR13:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC8]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; VI-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR13]](s16)
; VI-NEXT: G_STORE [[ANYEXT8]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; VI-NEXT: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR12]](s32)
; VI-NEXT: [[LSHR14:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC9]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; VI-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR14]](s16)
; VI-NEXT: G_STORE [[ANYEXT9]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
@@ -7104,7 +7104,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -7128,35 +7128,35 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
;
@@ -7170,7 +7170,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 2, addrspace 1)
;
; VI-LABEL: name: test_store_global_s160_align2
@@ -7184,35 +7184,35 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
;
@@ -7226,7 +7226,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -7249,7 +7249,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_s160_align4
@@ -7262,7 +7262,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_s160_align4
@@ -7275,7 +7275,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_s160_align4
@@ -7288,7 +7288,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -7311,7 +7311,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_s160_align8
@@ -7324,7 +7324,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
;
; VI-LABEL: name: test_store_global_s160_align8
@@ -7337,7 +7337,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_s160_align8
@@ -7350,7 +7350,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -7373,7 +7373,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_s160_align16
@@ -7386,7 +7386,7 @@ body: |
; CI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_s160_align16
@@ -7399,7 +7399,7 @@ body: |
; VI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_s160_align16
@@ -7412,7 +7412,7 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV4]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 16, align 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
@@ -7435,128 +7435,128 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; SI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; SI-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; SI-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV12]](s32)
; SI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY13]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; SI-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]]
; SI-NEXT: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[COPY14]](s32)
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY13]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR13]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; SI-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[LSHR12]], [[COPY15]](s32)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; SI-NEXT: G_STORE [[LSHR14]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
- ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
; SI-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV13]](s32)
; SI-NEXT: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[COPY16]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
; SI-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY16]], [[C3]]
; SI-NEXT: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[AND5]], [[COPY17]](s32)
- ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY16]](s32), [[PTR_ADD19]](p1) :: (store (s8) into unknown-address + 20, addrspace 1)
; SI-NEXT: G_STORE [[LSHR16]](s32), [[PTR_ADD21]](p1) :: (store (s8) into unknown-address + 21, addrspace 1)
; SI-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[LSHR15]], [[COPY18]](s32)
- ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD20]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD20]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR15]](s32), [[PTR_ADD20]](p1) :: (store (s8) into unknown-address + 22, addrspace 1)
; SI-NEXT: G_STORE [[LSHR17]](s32), [[PTR_ADD22]](p1) :: (store (s8) into unknown-address + 23, addrspace 1)
- ; SI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
; SI-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY [[UV14]](s32)
; SI-NEXT: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[COPY19]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
; SI-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY19]], [[C3]]
; SI-NEXT: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[AND6]], [[COPY20]](s32)
- ; SI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY19]](s32), [[PTR_ADD23]](p1) :: (store (s8) into unknown-address + 24, addrspace 1)
; SI-NEXT: G_STORE [[LSHR19]](s32), [[PTR_ADD25]](p1) :: (store (s8) into unknown-address + 25, addrspace 1)
; SI-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[LSHR18]], [[COPY21]](s32)
- ; SI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD24]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD24]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR18]](s32), [[PTR_ADD24]](p1) :: (store (s8) into unknown-address + 26, addrspace 1)
; SI-NEXT: G_STORE [[LSHR20]](s32), [[PTR_ADD26]](p1) :: (store (s8) into unknown-address + 27, addrspace 1)
- ; SI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C7]](s64)
; SI-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY [[UV15]](s32)
; SI-NEXT: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[COPY22]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
; SI-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]]
; SI-NEXT: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[COPY23]](s32)
- ; SI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY22]](s32), [[PTR_ADD27]](p1) :: (store (s8) into unknown-address + 28, addrspace 1)
; SI-NEXT: G_STORE [[LSHR22]](s32), [[PTR_ADD29]](p1) :: (store (s8) into unknown-address + 29, addrspace 1)
; SI-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[LSHR21]], [[COPY24]](s32)
- ; SI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD28]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD28]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR21]](s32), [[PTR_ADD28]](p1) :: (store (s8) into unknown-address + 30, addrspace 1)
; SI-NEXT: G_STORE [[LSHR23]](s32), [[PTR_ADD30]](p1) :: (store (s8) into unknown-address + 31, addrspace 1)
;
@@ -7568,7 +7568,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 1, addrspace 1)
;
; VI-LABEL: name: test_store_global_v8s32_align1
@@ -7581,135 +7581,135 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV12]](s32)
; VI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; VI-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; VI-NEXT: [[LSHR13:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC8]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; VI-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR13]](s16)
; VI-NEXT: G_STORE [[ANYEXT8]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; VI-NEXT: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR12]](s32)
; VI-NEXT: [[LSHR14:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC9]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; VI-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR14]](s16)
; VI-NEXT: G_STORE [[ANYEXT9]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; VI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV13]](s32)
; VI-NEXT: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
; VI-NEXT: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
; VI-NEXT: [[LSHR16:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC10]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD19]](p1) :: (store (s8) into unknown-address + 20, addrspace 1)
; VI-NEXT: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR16]](s16)
; VI-NEXT: G_STORE [[ANYEXT10]](s32), [[PTR_ADD21]](p1) :: (store (s8) into unknown-address + 21, addrspace 1)
; VI-NEXT: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR15]](s32)
; VI-NEXT: [[LSHR17:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC11]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD20]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD20]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR15]](s32), [[PTR_ADD20]](p1) :: (store (s8) into unknown-address + 22, addrspace 1)
; VI-NEXT: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR17]](s16)
; VI-NEXT: G_STORE [[ANYEXT11]](s32), [[PTR_ADD22]](p1) :: (store (s8) into unknown-address + 23, addrspace 1)
- ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
; VI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV14]](s32)
; VI-NEXT: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
; VI-NEXT: [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; VI-NEXT: [[LSHR19:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC12]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD23]](p1) :: (store (s8) into unknown-address + 24, addrspace 1)
; VI-NEXT: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR19]](s16)
; VI-NEXT: G_STORE [[ANYEXT12]](s32), [[PTR_ADD25]](p1) :: (store (s8) into unknown-address + 25, addrspace 1)
; VI-NEXT: [[TRUNC13:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR18]](s32)
; VI-NEXT: [[LSHR20:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC13]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD24]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD24]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR18]](s32), [[PTR_ADD24]](p1) :: (store (s8) into unknown-address + 26, addrspace 1)
; VI-NEXT: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR20]](s16)
; VI-NEXT: G_STORE [[ANYEXT13]](s32), [[PTR_ADD26]](p1) :: (store (s8) into unknown-address + 27, addrspace 1)
- ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
; VI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV15]](s32)
; VI-NEXT: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
; VI-NEXT: [[TRUNC14:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32)
; VI-NEXT: [[LSHR22:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC14]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD27]](p1) :: (store (s8) into unknown-address + 28, addrspace 1)
; VI-NEXT: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR22]](s16)
; VI-NEXT: G_STORE [[ANYEXT14]](s32), [[PTR_ADD29]](p1) :: (store (s8) into unknown-address + 29, addrspace 1)
; VI-NEXT: [[TRUNC15:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR21]](s32)
; VI-NEXT: [[LSHR23:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC15]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD28]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD28]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR21]](s32), [[PTR_ADD28]](p1) :: (store (s8) into unknown-address + 30, addrspace 1)
; VI-NEXT: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR23]](s16)
; VI-NEXT: G_STORE [[ANYEXT15]](s32), [[PTR_ADD30]](p1) :: (store (s8) into unknown-address + 31, addrspace 1)
@@ -7722,7 +7722,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -7745,54 +7745,54 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV12]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV13]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD9]](p1) :: (store (s16) into unknown-address + 20, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD10]](p1) :: (store (s16) into unknown-address + 22, addrspace 1)
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV14]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD11]](p1) :: (store (s16) into unknown-address + 24, addrspace 1)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD12]](p1) :: (store (s16) into unknown-address + 26, addrspace 1)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV15]](s32)
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD13]](p1) :: (store (s16) into unknown-address + 28, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD14]](p1) :: (store (s16) into unknown-address + 30, addrspace 1)
;
@@ -7804,7 +7804,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 2, addrspace 1)
;
; VI-LABEL: name: test_store_global_v8s32_align2
@@ -7817,54 +7817,54 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV12]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV13]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD9]](p1) :: (store (s16) into unknown-address + 20, addrspace 1)
; VI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD10]](p1) :: (store (s16) into unknown-address + 22, addrspace 1)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV14]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD11]](p1) :: (store (s16) into unknown-address + 24, addrspace 1)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD12]](p1) :: (store (s16) into unknown-address + 26, addrspace 1)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV15]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD13]](p1) :: (store (s16) into unknown-address + 28, addrspace 1)
; VI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD14]](p1) :: (store (s16) into unknown-address + 30, addrspace 1)
;
@@ -7876,7 +7876,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -7897,7 +7897,7 @@ body: |
; SI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; SI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
;
; CI-LABEL: name: test_store_global_v8s32_align4
@@ -7908,7 +7908,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
;
; VI-LABEL: name: test_store_global_v8s32_align4
@@ -7919,7 +7919,7 @@ body: |
; VI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; VI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v8s32_align4
@@ -7930,7 +7930,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -7951,7 +7951,7 @@ body: |
; SI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; SI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_v8s32_align8
@@ -7962,7 +7962,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
;
; VI-LABEL: name: test_store_global_v8s32_align8
@@ -7973,7 +7973,7 @@ body: |
; VI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; VI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v8s32_align8
@@ -7984,7 +7984,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8005,7 +8005,7 @@ body: |
; SI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; SI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v8s32_align16
@@ -8016,7 +8016,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v8s32_align16
@@ -8027,7 +8027,7 @@ body: |
; VI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; VI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v8s32_align16
@@ -8038,7 +8038,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8060,7 +8060,7 @@ body: |
; SI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; SI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v2s128_align32
@@ -8072,7 +8072,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v2s128_align32
@@ -8084,7 +8084,7 @@ body: |
; VI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; VI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v2s128_align32
@@ -8096,7 +8096,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s128>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8120,128 +8120,128 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY6]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY9]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY12]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; SI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; SI-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; SI-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV12]](s32)
; SI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY13]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; SI-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]]
; SI-NEXT: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[COPY14]](s32)
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY13]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR13]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; SI-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[LSHR12]], [[COPY15]](s32)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; SI-NEXT: G_STORE [[LSHR14]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
- ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
; SI-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV13]](s32)
; SI-NEXT: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[COPY16]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
; SI-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY16]], [[C3]]
; SI-NEXT: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[AND5]], [[COPY17]](s32)
- ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY16]](s32), [[PTR_ADD19]](p1) :: (store (s8) into unknown-address + 20, addrspace 1)
; SI-NEXT: G_STORE [[LSHR16]](s32), [[PTR_ADD21]](p1) :: (store (s8) into unknown-address + 21, addrspace 1)
; SI-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[LSHR15]], [[COPY18]](s32)
- ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD20]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD20]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR15]](s32), [[PTR_ADD20]](p1) :: (store (s8) into unknown-address + 22, addrspace 1)
; SI-NEXT: G_STORE [[LSHR17]](s32), [[PTR_ADD22]](p1) :: (store (s8) into unknown-address + 23, addrspace 1)
- ; SI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
; SI-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY [[UV14]](s32)
; SI-NEXT: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[COPY19]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
; SI-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY19]], [[C3]]
; SI-NEXT: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[AND6]], [[COPY20]](s32)
- ; SI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY19]](s32), [[PTR_ADD23]](p1) :: (store (s8) into unknown-address + 24, addrspace 1)
; SI-NEXT: G_STORE [[LSHR19]](s32), [[PTR_ADD25]](p1) :: (store (s8) into unknown-address + 25, addrspace 1)
; SI-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[LSHR18]], [[COPY21]](s32)
- ; SI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD24]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD24]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR18]](s32), [[PTR_ADD24]](p1) :: (store (s8) into unknown-address + 26, addrspace 1)
; SI-NEXT: G_STORE [[LSHR20]](s32), [[PTR_ADD26]](p1) :: (store (s8) into unknown-address + 27, addrspace 1)
- ; SI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C7]](s64)
; SI-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY [[UV15]](s32)
; SI-NEXT: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[COPY22]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
; SI-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]]
; SI-NEXT: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[COPY23]](s32)
- ; SI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY22]](s32), [[PTR_ADD27]](p1) :: (store (s8) into unknown-address + 28, addrspace 1)
; SI-NEXT: G_STORE [[LSHR22]](s32), [[PTR_ADD29]](p1) :: (store (s8) into unknown-address + 29, addrspace 1)
; SI-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[LSHR21]], [[COPY24]](s32)
- ; SI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD28]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD28]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR21]](s32), [[PTR_ADD28]](p1) :: (store (s8) into unknown-address + 30, addrspace 1)
; SI-NEXT: G_STORE [[LSHR23]](s32), [[PTR_ADD30]](p1) :: (store (s8) into unknown-address + 31, addrspace 1)
;
@@ -8254,7 +8254,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 1, addrspace 1)
;
; VI-LABEL: name: test_store_global_s256_align1
@@ -8268,135 +8268,135 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV12]](s32)
; VI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; VI-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; VI-NEXT: [[LSHR13:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC8]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; VI-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR13]](s16)
; VI-NEXT: G_STORE [[ANYEXT8]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; VI-NEXT: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR12]](s32)
; VI-NEXT: [[LSHR14:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC9]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; VI-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR14]](s16)
; VI-NEXT: G_STORE [[ANYEXT9]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; VI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV13]](s32)
; VI-NEXT: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
; VI-NEXT: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
; VI-NEXT: [[LSHR16:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC10]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD19]](p1) :: (store (s8) into unknown-address + 20, addrspace 1)
; VI-NEXT: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR16]](s16)
; VI-NEXT: G_STORE [[ANYEXT10]](s32), [[PTR_ADD21]](p1) :: (store (s8) into unknown-address + 21, addrspace 1)
; VI-NEXT: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR15]](s32)
; VI-NEXT: [[LSHR17:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC11]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD20]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD20]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR15]](s32), [[PTR_ADD20]](p1) :: (store (s8) into unknown-address + 22, addrspace 1)
; VI-NEXT: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR17]](s16)
; VI-NEXT: G_STORE [[ANYEXT11]](s32), [[PTR_ADD22]](p1) :: (store (s8) into unknown-address + 23, addrspace 1)
- ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
; VI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV14]](s32)
; VI-NEXT: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
; VI-NEXT: [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; VI-NEXT: [[LSHR19:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC12]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD23]](p1) :: (store (s8) into unknown-address + 24, addrspace 1)
; VI-NEXT: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR19]](s16)
; VI-NEXT: G_STORE [[ANYEXT12]](s32), [[PTR_ADD25]](p1) :: (store (s8) into unknown-address + 25, addrspace 1)
; VI-NEXT: [[TRUNC13:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR18]](s32)
; VI-NEXT: [[LSHR20:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC13]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD24]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD24]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR18]](s32), [[PTR_ADD24]](p1) :: (store (s8) into unknown-address + 26, addrspace 1)
; VI-NEXT: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR20]](s16)
; VI-NEXT: G_STORE [[ANYEXT13]](s32), [[PTR_ADD26]](p1) :: (store (s8) into unknown-address + 27, addrspace 1)
- ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
; VI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV15]](s32)
; VI-NEXT: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
; VI-NEXT: [[TRUNC14:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32)
; VI-NEXT: [[LSHR22:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC14]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD27]](p1) :: (store (s8) into unknown-address + 28, addrspace 1)
; VI-NEXT: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR22]](s16)
; VI-NEXT: G_STORE [[ANYEXT14]](s32), [[PTR_ADD29]](p1) :: (store (s8) into unknown-address + 29, addrspace 1)
; VI-NEXT: [[TRUNC15:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR21]](s32)
; VI-NEXT: [[LSHR23:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC15]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD28]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD28]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR21]](s32), [[PTR_ADD28]](p1) :: (store (s8) into unknown-address + 30, addrspace 1)
; VI-NEXT: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR23]](s16)
; VI-NEXT: G_STORE [[ANYEXT15]](s32), [[PTR_ADD30]](p1) :: (store (s8) into unknown-address + 31, addrspace 1)
@@ -8410,7 +8410,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8434,54 +8434,54 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV12]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV13]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD9]](p1) :: (store (s16) into unknown-address + 20, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD10]](p1) :: (store (s16) into unknown-address + 22, addrspace 1)
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV14]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD11]](p1) :: (store (s16) into unknown-address + 24, addrspace 1)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD12]](p1) :: (store (s16) into unknown-address + 26, addrspace 1)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV15]](s32)
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD13]](p1) :: (store (s16) into unknown-address + 28, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD14]](p1) :: (store (s16) into unknown-address + 30, addrspace 1)
;
@@ -8494,7 +8494,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 2, addrspace 1)
;
; VI-LABEL: name: test_store_global_s256_align2
@@ -8508,54 +8508,54 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV12]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV13]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD9]](p1) :: (store (s16) into unknown-address + 20, addrspace 1)
; VI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD10]](p1) :: (store (s16) into unknown-address + 22, addrspace 1)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV14]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD11]](p1) :: (store (s16) into unknown-address + 24, addrspace 1)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD12]](p1) :: (store (s16) into unknown-address + 26, addrspace 1)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV15]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD13]](p1) :: (store (s16) into unknown-address + 28, addrspace 1)
; VI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD14]](p1) :: (store (s16) into unknown-address + 30, addrspace 1)
;
@@ -8568,7 +8568,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8590,7 +8590,7 @@ body: |
; SI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; SI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
;
; CI-LABEL: name: test_store_global_s256_align4
@@ -8602,7 +8602,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
;
; VI-LABEL: name: test_store_global_s256_align4
@@ -8614,7 +8614,7 @@ body: |
; VI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; VI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_s256_align4
@@ -8626,7 +8626,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8648,7 +8648,7 @@ body: |
; SI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; SI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_s256_align8
@@ -8660,7 +8660,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
;
; VI-LABEL: name: test_store_global_s256_align8
@@ -8672,7 +8672,7 @@ body: |
; VI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; VI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_s256_align8
@@ -8684,7 +8684,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8706,7 +8706,7 @@ body: |
; SI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; SI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_s256_align16
@@ -8718,7 +8718,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_s256_align16
@@ -8730,7 +8730,7 @@ body: |
; VI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; VI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_s256_align16
@@ -8742,7 +8742,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8764,7 +8764,7 @@ body: |
; SI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; SI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_s256_align32
@@ -8776,7 +8776,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_s256_align32
@@ -8788,7 +8788,7 @@ body: |
; VI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; VI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_s256_align32
@@ -8800,7 +8800,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[BITCAST]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8821,7 +8821,7 @@ body: |
; SI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; SI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v8s32_align32
@@ -8832,7 +8832,7 @@ body: |
; CI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; CI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v8s32_align32
@@ -8843,7 +8843,7 @@ body: |
; VI-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; VI-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v8s32_align32
@@ -8854,7 +8854,7 @@ body: |
; GFX9-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[COPY1]](<8 x s32>)
; GFX9-NEXT: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 32, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
@@ -8881,143 +8881,143 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY5]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C3]]
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY7]](s32)
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[LSHR3]], [[COPY8]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]]
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY10]](s32)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[LSHR6]], [[COPY11]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; SI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY12]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C3]]
; SI-NEXT: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY13]](s32)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY12]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR10]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; SI-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[LSHR9]], [[COPY14]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; SI-NEXT: G_STORE [[LSHR11]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; SI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; SI-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; SI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY15]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; SI-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY15]], [[C3]]
; SI-NEXT: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[COPY16]](s32)
- ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY15]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR13]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; SI-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[LSHR12]], [[COPY17]](s32)
- ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; SI-NEXT: G_STORE [[LSHR14]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
- ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
; SI-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
; SI-NEXT: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[COPY18]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
; SI-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]]
; SI-NEXT: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[AND5]], [[COPY19]](s32)
- ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY18]](s32), [[PTR_ADD19]](p1) :: (store (s8) into unknown-address + 20, addrspace 1)
; SI-NEXT: G_STORE [[LSHR16]](s32), [[PTR_ADD21]](p1) :: (store (s8) into unknown-address + 21, addrspace 1)
; SI-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[LSHR15]], [[COPY20]](s32)
- ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD20]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD20]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR15]](s32), [[PTR_ADD20]](p1) :: (store (s8) into unknown-address + 22, addrspace 1)
; SI-NEXT: G_STORE [[LSHR17]](s32), [[PTR_ADD22]](p1) :: (store (s8) into unknown-address + 23, addrspace 1)
- ; SI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
; SI-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY [[UV6]](s32)
; SI-NEXT: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[COPY21]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
; SI-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY21]], [[C3]]
; SI-NEXT: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[AND6]], [[COPY22]](s32)
- ; SI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY21]](s32), [[PTR_ADD23]](p1) :: (store (s8) into unknown-address + 24, addrspace 1)
; SI-NEXT: G_STORE [[LSHR19]](s32), [[PTR_ADD25]](p1) :: (store (s8) into unknown-address + 25, addrspace 1)
; SI-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[LSHR18]], [[COPY23]](s32)
- ; SI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD24]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD24]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR18]](s32), [[PTR_ADD24]](p1) :: (store (s8) into unknown-address + 26, addrspace 1)
; SI-NEXT: G_STORE [[LSHR20]](s32), [[PTR_ADD26]](p1) :: (store (s8) into unknown-address + 27, addrspace 1)
- ; SI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64)
+ ; SI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C7]](s64)
; SI-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY [[UV7]](s32)
; SI-NEXT: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[COPY24]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
; SI-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY24]], [[C3]]
; SI-NEXT: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[COPY25]](s32)
- ; SI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY24]](s32), [[PTR_ADD27]](p1) :: (store (s8) into unknown-address + 28, addrspace 1)
; SI-NEXT: G_STORE [[LSHR22]](s32), [[PTR_ADD29]](p1) :: (store (s8) into unknown-address + 29, addrspace 1)
; SI-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[LSHR21]], [[COPY26]](s32)
- ; SI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD28]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD28]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR21]](s32), [[PTR_ADD28]](p1) :: (store (s8) into unknown-address + 30, addrspace 1)
; SI-NEXT: G_STORE [[LSHR23]](s32), [[PTR_ADD30]](p1) :: (store (s8) into unknown-address + 31, addrspace 1)
; SI-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; SI-NEXT: [[PTR_ADD31:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64)
+ ; SI-NEXT: [[PTR_ADD31:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s64)
; SI-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
; SI-NEXT: [[LSHR24:%[0-9]+]]:_(s32) = G_LSHR [[COPY27]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD32:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD32:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD31]], [[C1]](s64)
; SI-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY27]], [[C3]]
; SI-NEXT: [[LSHR25:%[0-9]+]]:_(s32) = G_LSHR [[AND8]], [[COPY28]](s32)
- ; SI-NEXT: [[PTR_ADD33:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD33:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD31]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY27]](s32), [[PTR_ADD31]](p1) :: (store (s8) into unknown-address + 32, addrspace 1)
; SI-NEXT: G_STORE [[LSHR25]](s32), [[PTR_ADD33]](p1) :: (store (s8) into unknown-address + 33, addrspace 1)
; SI-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR26:%[0-9]+]]:_(s32) = G_LSHR [[LSHR24]], [[COPY29]](s32)
- ; SI-NEXT: [[PTR_ADD34:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD32]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD34:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD32]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR24]](s32), [[PTR_ADD32]](p1) :: (store (s8) into unknown-address + 34, addrspace 1)
; SI-NEXT: G_STORE [[LSHR26]](s32), [[PTR_ADD34]](p1) :: (store (s8) into unknown-address + 35, addrspace 1)
;
@@ -9035,10 +9035,10 @@ body: |
; CI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 1, addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 1, addrspace 1)
;
; VI-LABEL: name: test_store_global_v9s32_align1
@@ -9055,151 +9055,151 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p1) :: (store (s8) into unknown-address + 4, addrspace 1)
; VI-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; VI-NEXT: G_STORE [[ANYEXT2]](s32), [[PTR_ADD5]](p1) :: (store (s8) into unknown-address + 5, addrspace 1)
; VI-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD4]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD4]](p1) :: (store (s8) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; VI-NEXT: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store (s8) into unknown-address + 7, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store (s8) into unknown-address + 8, addrspace 1)
; VI-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; VI-NEXT: G_STORE [[ANYEXT4]](s32), [[PTR_ADD9]](p1) :: (store (s8) into unknown-address + 9, addrspace 1)
; VI-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD8]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD8]](p1) :: (store (s8) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; VI-NEXT: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store (s8) into unknown-address + 11, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
; VI-NEXT: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD11]](p1) :: (store (s8) into unknown-address + 12, addrspace 1)
; VI-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
; VI-NEXT: G_STORE [[ANYEXT6]](s32), [[PTR_ADD13]](p1) :: (store (s8) into unknown-address + 13, addrspace 1)
; VI-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
; VI-NEXT: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD12]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR9]](s32), [[PTR_ADD12]](p1) :: (store (s8) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
; VI-NEXT: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store (s8) into unknown-address + 15, addrspace 1)
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; VI-NEXT: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; VI-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; VI-NEXT: [[LSHR13:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC8]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD17:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD15]](p1) :: (store (s8) into unknown-address + 16, addrspace 1)
; VI-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR13]](s16)
; VI-NEXT: G_STORE [[ANYEXT8]](s32), [[PTR_ADD17]](p1) :: (store (s8) into unknown-address + 17, addrspace 1)
; VI-NEXT: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR12]](s32)
; VI-NEXT: [[LSHR14:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC9]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD18:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD16]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR12]](s32), [[PTR_ADD16]](p1) :: (store (s8) into unknown-address + 18, addrspace 1)
; VI-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR14]](s16)
; VI-NEXT: G_STORE [[ANYEXT9]](s32), [[PTR_ADD18]](p1) :: (store (s8) into unknown-address + 19, addrspace 1)
- ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD19:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
; VI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
; VI-NEXT: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD20:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C1]](s64)
; VI-NEXT: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32)
; VI-NEXT: [[LSHR16:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC10]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD21:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD19]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD19]](p1) :: (store (s8) into unknown-address + 20, addrspace 1)
; VI-NEXT: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR16]](s16)
; VI-NEXT: G_STORE [[ANYEXT10]](s32), [[PTR_ADD21]](p1) :: (store (s8) into unknown-address + 21, addrspace 1)
; VI-NEXT: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR15]](s32)
; VI-NEXT: [[LSHR17:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC11]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD20]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD22:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD20]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR15]](s32), [[PTR_ADD20]](p1) :: (store (s8) into unknown-address + 22, addrspace 1)
; VI-NEXT: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR17]](s16)
; VI-NEXT: G_STORE [[ANYEXT11]](s32), [[PTR_ADD22]](p1) :: (store (s8) into unknown-address + 23, addrspace 1)
- ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD23:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
; VI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV6]](s32)
; VI-NEXT: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD24:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C1]](s64)
; VI-NEXT: [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[COPY10]](s32)
; VI-NEXT: [[LSHR19:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC12]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD25:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD23]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD23]](p1) :: (store (s8) into unknown-address + 24, addrspace 1)
; VI-NEXT: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR19]](s16)
; VI-NEXT: G_STORE [[ANYEXT12]](s32), [[PTR_ADD25]](p1) :: (store (s8) into unknown-address + 25, addrspace 1)
; VI-NEXT: [[TRUNC13:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR18]](s32)
; VI-NEXT: [[LSHR20:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC13]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD24]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD26:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD24]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR18]](s32), [[PTR_ADD24]](p1) :: (store (s8) into unknown-address + 26, addrspace 1)
; VI-NEXT: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR20]](s16)
; VI-NEXT: G_STORE [[ANYEXT13]](s32), [[PTR_ADD26]](p1) :: (store (s8) into unknown-address + 27, addrspace 1)
- ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD27:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
; VI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[UV7]](s32)
; VI-NEXT: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[COPY11]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD28:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C1]](s64)
; VI-NEXT: [[TRUNC14:%[0-9]+]]:_(s16) = G_TRUNC [[COPY11]](s32)
; VI-NEXT: [[LSHR22:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC14]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD29:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD27]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY11]](s32), [[PTR_ADD27]](p1) :: (store (s8) into unknown-address + 28, addrspace 1)
; VI-NEXT: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR22]](s16)
; VI-NEXT: G_STORE [[ANYEXT14]](s32), [[PTR_ADD29]](p1) :: (store (s8) into unknown-address + 29, addrspace 1)
; VI-NEXT: [[TRUNC15:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR21]](s32)
; VI-NEXT: [[LSHR23:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC15]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD28]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD30:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD28]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR21]](s32), [[PTR_ADD28]](p1) :: (store (s8) into unknown-address + 30, addrspace 1)
; VI-NEXT: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR23]](s16)
; VI-NEXT: G_STORE [[ANYEXT15]](s32), [[PTR_ADD30]](p1) :: (store (s8) into unknown-address + 31, addrspace 1)
; VI-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; VI-NEXT: [[PTR_ADD31:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+ ; VI-NEXT: [[PTR_ADD31:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64)
; VI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
; VI-NEXT: [[LSHR24:%[0-9]+]]:_(s32) = G_LSHR [[COPY12]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD32:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD32:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD31]], [[C1]](s64)
; VI-NEXT: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
; VI-NEXT: [[LSHR25:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC16]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD33:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD33:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD31]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY12]](s32), [[PTR_ADD31]](p1) :: (store (s8) into unknown-address + 32, addrspace 1)
; VI-NEXT: [[ANYEXT16:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR25]](s16)
; VI-NEXT: G_STORE [[ANYEXT16]](s32), [[PTR_ADD33]](p1) :: (store (s8) into unknown-address + 33, addrspace 1)
; VI-NEXT: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR24]](s32)
; VI-NEXT: [[LSHR26:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC17]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD34:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD32]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD34:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD32]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR24]](s32), [[PTR_ADD32]](p1) :: (store (s8) into unknown-address + 34, addrspace 1)
; VI-NEXT: [[ANYEXT17:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR26]](s16)
; VI-NEXT: G_STORE [[ANYEXT17]](s32), [[PTR_ADD34]](p1) :: (store (s8) into unknown-address + 35, addrspace 1)
@@ -9218,10 +9218,10 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 1, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 1, addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
@@ -9251,60 +9251,60 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; SI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; SI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; SI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; SI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; SI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; SI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
- ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; SI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
; SI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD9]](p1) :: (store (s16) into unknown-address + 20, addrspace 1)
; SI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD10]](p1) :: (store (s16) into unknown-address + 22, addrspace 1)
- ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; SI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; SI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV6]](s32)
; SI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s16) into unknown-address + 24, addrspace 1)
; SI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD12]](p1) :: (store (s16) into unknown-address + 26, addrspace 1)
- ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; SI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[UV7]](s32)
; SI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[COPY11]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY11]](s32), [[PTR_ADD13]](p1) :: (store (s16) into unknown-address + 28, addrspace 1)
; SI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD14]](p1) :: (store (s16) into unknown-address + 30, addrspace 1)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
; SI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[COPY12]], [[C]](s32)
- ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY12]](s32), [[PTR_ADD15]](p1) :: (store (s16) into unknown-address + 32, addrspace 1)
; SI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD16]](p1) :: (store (s16) into unknown-address + 34, addrspace 1)
;
@@ -9322,10 +9322,10 @@ body: |
; CI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 2, addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 2, addrspace 1)
;
; VI-LABEL: name: test_store_global_v9s32_align2
@@ -9342,60 +9342,60 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY4]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD1]](p1) :: (store (s16) into unknown-address + 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD2]](p1) :: (store (s16) into unknown-address + 6, addrspace 1)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY6]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store (s16) into unknown-address + 8, addrspace 1)
; VI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD4]](p1) :: (store (s16) into unknown-address + 10, addrspace 1)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
- ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY7]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p1) :: (store (s16) into unknown-address + 12, addrspace 1)
; VI-NEXT: G_STORE [[LSHR3]](s32), [[PTR_ADD6]](p1) :: (store (s16) into unknown-address + 14, addrspace 1)
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; VI-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; VI-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
; VI-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY8]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY8]](s32), [[PTR_ADD7]](p1) :: (store (s16) into unknown-address + 16, addrspace 1)
; VI-NEXT: G_STORE [[LSHR4]](s32), [[PTR_ADD8]](p1) :: (store (s16) into unknown-address + 18, addrspace 1)
- ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; VI-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
; VI-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY9]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY9]](s32), [[PTR_ADD9]](p1) :: (store (s16) into unknown-address + 20, addrspace 1)
; VI-NEXT: G_STORE [[LSHR5]](s32), [[PTR_ADD10]](p1) :: (store (s16) into unknown-address + 22, addrspace 1)
- ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
; VI-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV6]](s32)
; VI-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY10]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY10]](s32), [[PTR_ADD11]](p1) :: (store (s16) into unknown-address + 24, addrspace 1)
; VI-NEXT: G_STORE [[LSHR6]](s32), [[PTR_ADD12]](p1) :: (store (s16) into unknown-address + 26, addrspace 1)
- ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; VI-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[UV7]](s32)
; VI-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[COPY11]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY11]](s32), [[PTR_ADD13]](p1) :: (store (s16) into unknown-address + 28, addrspace 1)
; VI-NEXT: G_STORE [[LSHR7]](s32), [[PTR_ADD14]](p1) :: (store (s16) into unknown-address + 30, addrspace 1)
; VI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; VI-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; VI-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
; VI-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[COPY12]], [[C]](s32)
- ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD16:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY12]](s32), [[PTR_ADD15]](p1) :: (store (s16) into unknown-address + 32, addrspace 1)
; VI-NEXT: G_STORE [[LSHR8]](s32), [[PTR_ADD16]](p1) :: (store (s16) into unknown-address + 34, addrspace 1)
;
@@ -9413,10 +9413,10 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 2, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 2, addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
@@ -9446,10 +9446,10 @@ body: |
; SI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, addrspace 1)
;
; CI-LABEL: name: test_store_global_v9s32_align4
@@ -9466,10 +9466,10 @@ body: |
; CI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, addrspace 1)
;
; VI-LABEL: name: test_store_global_v9s32_align4
@@ -9486,10 +9486,10 @@ body: |
; VI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v9s32_align4
@@ -9506,10 +9506,10 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
@@ -9539,10 +9539,10 @@ body: |
; SI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 8, addrspace 1)
;
; CI-LABEL: name: test_store_global_v9s32_align8
@@ -9559,10 +9559,10 @@ body: |
; CI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 8, addrspace 1)
;
; VI-LABEL: name: test_store_global_v9s32_align8
@@ -9579,10 +9579,10 @@ body: |
; VI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 8, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v9s32_align8
@@ -9599,10 +9599,10 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), align 8, addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 8, addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
@@ -9632,10 +9632,10 @@ body: |
; SI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 16, addrspace 1)
;
; CI-LABEL: name: test_store_global_v9s32_align16
@@ -9652,10 +9652,10 @@ body: |
; CI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; CI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; CI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
; CI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 16, addrspace 1)
;
; VI-LABEL: name: test_store_global_v9s32_align16
@@ -9672,10 +9672,10 @@ body: |
; VI-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; VI-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; VI-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 16, addrspace 1)
;
; GFX9-LABEL: name: test_store_global_v9s32_align16
@@ -9692,10 +9692,10 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX9-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX9-NEXT: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, addrspace 1)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; GFX9-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; GFX9-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p1) :: (store (s32) into unknown-address + 32, align 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir
index 22d792a..a931c63 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir
@@ -193,7 +193,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), align 4, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; SI-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, addrspace 1)
;
; VI-LABEL: name: test_store_global_v3s32
@@ -276,7 +276,7 @@ body: |
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C]](s32)
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; SI-NEXT: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
;
@@ -290,7 +290,7 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C]](s16)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
@@ -341,7 +341,7 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
;
@@ -355,7 +355,7 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
@@ -379,18 +379,18 @@ body: |
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
;
@@ -404,18 +404,18 @@ body: |
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
; VI-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C2]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C3]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
@@ -573,7 +573,7 @@ body: |
; SI-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; SI-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s32>), [[COPY1]](p1) :: (store (<2 x s32>), align 16, addrspace 1)
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; SI-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, align 8, addrspace 1)
;
; VI-LABEL: name: test_store_global_96
@@ -667,7 +667,7 @@ body: |
; SI-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[C1]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
;
@@ -688,7 +688,7 @@ body: |
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[OR]], [[C1]](s16)
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
@@ -828,12 +828,12 @@ body: |
; SI-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY4]], [[C3]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C5]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[C1]](s32)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY4]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
@@ -867,10 +867,10 @@ body: |
; VI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[OR]], [[C1]](s16)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
@@ -919,7 +919,7 @@ body: |
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C3]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
;
@@ -952,7 +952,7 @@ body: |
; VI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
@@ -999,7 +999,7 @@ body: |
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C3]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
;
@@ -1032,7 +1032,7 @@ body: |
; VI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
@@ -1071,17 +1071,17 @@ body: |
; SI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C5]]
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[C1]](s32)
; SI-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[COPY3]](s32)
- ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
+ ; SI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C6]](s64)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
;
@@ -1108,7 +1108,7 @@ body: |
; VI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI-NEXT: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C5]]
@@ -1119,13 +1119,13 @@ body: |
; VI-NEXT: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL3]]
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[OR3]], [[C6]](s16)
; VI-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI-NEXT: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C6]](s16)
- ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C7]](s64)
+ ; VI-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C7]](s64)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI-NEXT: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
@@ -1165,7 +1165,7 @@ body: |
; SI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; SI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; SI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
;
@@ -1192,7 +1192,7 @@ body: |
; VI-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; VI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; VI-NEXT: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-global.mir
index 84608f6..088647e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-global.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-global.mir
@@ -27,6 +27,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[LOAD]], 1
; GFX8-NEXT: $vgpr0 = COPY [[ASSERT_ZEXT]](s32)
+ ;
; GFX6-LABEL: name: test_zextload_global_i32_i1
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -52,6 +53,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[LOAD]], 7
; GFX8-NEXT: $vgpr0 = COPY [[ASSERT_ZEXT]](s32)
+ ;
; GFX6-LABEL: name: test_zextload_global_i32_i7
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -76,19 +78,20 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX8-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX8-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX8-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX8-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; GFX8-NEXT: $vgpr0 = COPY [[OR]](s32)
+ ;
; GFX6-LABEL: name: test_zextload_global_i32_i24
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
; GFX6-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX6-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 4, addrspace 1)
; GFX6-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX6-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 2, align 2, addrspace 1)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
@@ -112,6 +115,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s32), addrspace 1)
; GFX8-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[LOAD]], 30
; GFX8-NEXT: $vgpr0 = COPY [[ASSERT_ZEXT]](s32)
+ ;
; GFX6-LABEL: name: test_zextload_global_i32_i30
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -137,6 +141,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s32), addrspace 1)
; GFX8-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[LOAD]], 31
; GFX8-NEXT: $vgpr0 = COPY [[ASSERT_ZEXT]](s32)
+ ;
; GFX6-LABEL: name: test_zextload_global_i32_i31
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -161,6 +166,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[ZEXTLOAD]](s32)
+ ;
; GFX6-LABEL: name: test_zextload_global_i32_i8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -183,6 +189,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[ZEXTLOAD]](s32)
+ ;
; GFX6-LABEL: name: test_zextload_global_i32_i16
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -205,6 +212,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[ZEXTLOAD]](s32)
+ ;
; GFX6-LABEL: name: test_zextload_global_i31_i8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -229,6 +237,7 @@ body: |
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX8-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ZEXTLOAD]](s32)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
+ ;
; GFX6-LABEL: name: test_zextload_global_i64_i8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -253,6 +262,7 @@ body: |
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), addrspace 1)
; GFX8-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ZEXTLOAD]](s32)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
+ ;
; GFX6-LABEL: name: test_zextload_global_i64_i16
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -277,6 +287,7 @@ body: |
; GFX8-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load (s32), addrspace 1)
; GFX8-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
+ ;
; GFX6-LABEL: name: test_zextload_global_i64_i32
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -301,13 +312,14 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[ZEXTLOAD]](s32)
+ ;
; GFX6-LABEL: name: test_zextload_global_s32_from_2_align1
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
; GFX6-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX6-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX6-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX6-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
@@ -331,13 +343,14 @@ body: |
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s16), align 1, addrspace 1)
; GFX8-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ZEXTLOAD]](s32)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
+ ;
; GFX6-LABEL: name: test_zextload_global_s64_from_2_align1
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
; GFX6-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX6-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load (s8), addrspace 1)
; GFX6-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX6-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX6-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p1) :: (load (s8) from unknown-address + 1, addrspace 1)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
@@ -361,6 +374,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(<2 x s16>) = G_ZEXTLOAD [[COPY]](p1) :: (load (<2 x s8>), addrspace 1)
; GFX8-NEXT: $vgpr0 = COPY [[ZEXTLOAD]](<2 x s16>)
+ ;
; GFX6-LABEL: name: test_zextload_global_v2i16_from_2
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -384,6 +398,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(<2 x s32>) = G_ZEXTLOAD [[COPY]](p1) :: (load (<2 x s8>), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[ZEXTLOAD]](<2 x s32>)
+ ;
; GFX6-LABEL: name: test_zextload_global_v2i32_from_2
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -407,6 +422,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(<2 x s32>) = G_ZEXTLOAD [[COPY]](p1) :: (load (<2 x s16>), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[ZEXTLOAD]](<2 x s32>)
+ ;
; GFX6-LABEL: name: test_zextload_global_v2i32_from_4
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -430,6 +446,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(<2 x s64>) = G_ZEXTLOAD [[COPY]](p1) :: (load (<2 x s16>), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[ZEXTLOAD]](<2 x s64>)
+ ;
; GFX6-LABEL: name: test_zextload_global_v2i64_from_4
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -453,6 +470,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(<2 x s64>) = G_ZEXTLOAD [[COPY]](p1) :: (load (<2 x s32>), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[ZEXTLOAD]](<2 x s64>)
+ ;
; GFX6-LABEL: name: test_zextload_global_v2i64_from_8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
@@ -476,6 +494,7 @@ body: |
; GFX8-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; GFX8-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s128) = G_ZEXTLOAD [[COPY]](p1) :: (load (s64), addrspace 1)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[ZEXTLOAD]](s128)
+ ;
; GFX6-LABEL: name: test_zextload_global_s128_8
; GFX6: liveins: $vgpr0_vgpr1
; GFX6-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll
index 6603761..002c03aa 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll
@@ -545,11 +545,13 @@ define amdgpu_kernel void @workgroup_one_as_release() #0 {
; GFX10WGP-LABEL: name: workgroup_one_as_release
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 16240
+ ; GFX10WGP-NEXT: S_WAITCNT_lds_direct
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: workgroup_one_as_release
; GFX10CU: bb.0.entry:
+ ; GFX10CU-NEXT: S_WAITCNT_lds_direct
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_one_as_release
@@ -578,12 +580,14 @@ define amdgpu_kernel void @workgroup_one_as_acq_rel() #0 {
; GFX10WGP-LABEL: name: workgroup_one_as_acq_rel
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 16240
+ ; GFX10WGP-NEXT: S_WAITCNT_lds_direct
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: workgroup_one_as_acq_rel
; GFX10CU: bb.0.entry:
+ ; GFX10CU-NEXT: S_WAITCNT_lds_direct
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_one_as_acq_rel
@@ -613,12 +617,14 @@ define amdgpu_kernel void @workgroup_one_as_seq_cst() #0 {
; GFX10WGP-LABEL: name: workgroup_one_as_seq_cst
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 16240
+ ; GFX10WGP-NEXT: S_WAITCNT_lds_direct
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: workgroup_one_as_seq_cst
; GFX10CU: bb.0.entry:
+ ; GFX10CU-NEXT: S_WAITCNT_lds_direct
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_one_as_seq_cst
@@ -1293,12 +1299,14 @@ define amdgpu_kernel void @workgroup_release() #0 {
; GFX10WGP-LABEL: name: workgroup_release
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 112
+ ; GFX10WGP-NEXT: S_WAITCNT_lds_direct
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10WGP-NEXT: S_ENDPGM 0
;
; GFX10CU-LABEL: name: workgroup_release
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 49279
+ ; GFX10CU-NEXT: S_WAITCNT_lds_direct
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_release
@@ -1330,6 +1338,7 @@ define amdgpu_kernel void @workgroup_acq_rel() #0 {
; GFX10WGP-LABEL: name: workgroup_acq_rel
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 112
+ ; GFX10WGP-NEXT: S_WAITCNT_lds_direct
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
@@ -1337,6 +1346,7 @@ define amdgpu_kernel void @workgroup_acq_rel() #0 {
; GFX10CU-LABEL: name: workgroup_acq_rel
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 49279
+ ; GFX10CU-NEXT: S_WAITCNT_lds_direct
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_acq_rel
@@ -1369,6 +1379,7 @@ define amdgpu_kernel void @workgroup_seq_cst() #0 {
; GFX10WGP-LABEL: name: workgroup_seq_cst
; GFX10WGP: bb.0.entry:
; GFX10WGP-NEXT: S_WAITCNT_soft 112
+ ; GFX10WGP-NEXT: S_WAITCNT_lds_direct
; GFX10WGP-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10WGP-NEXT: BUFFER_GL0_INV implicit $exec
; GFX10WGP-NEXT: S_ENDPGM 0
@@ -1376,6 +1387,7 @@ define amdgpu_kernel void @workgroup_seq_cst() #0 {
; GFX10CU-LABEL: name: workgroup_seq_cst
; GFX10CU: bb.0.entry:
; GFX10CU-NEXT: S_WAITCNT_soft 49279
+ ; GFX10CU-NEXT: S_WAITCNT_lds_direct
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/minmaxabs-i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/minmaxabs-i64.ll
new file mode 100644
index 0000000..43c8f46
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/minmaxabs-i64.ll
@@ -0,0 +1,192 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 < %s | FileCheck %s
+
+declare i64 @llvm.umin.i64(i64, i64)
+declare i64 @llvm.umax.i64(i64, i64)
+declare i64 @llvm.smin.i64(i64, i64)
+declare i64 @llvm.smax.i64(i64, i64)
+declare i64 @llvm.abs.i64(i64, i1)
+
+declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
+declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
+declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
+declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
+
+define i64 @test_umin_i64(i64 %a, i64 %b) {
+; CHECK-LABEL: test_umin_i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_min_u64 v[0:1], v[0:1], v[2:3]
+; CHECK-NEXT: s_set_pc_i64 s[30:31]
+ %r = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+ ret i64 %r
+}
+
+define i64 @test_umax_i64(i64 %a, i64 %b) {
+; CHECK-LABEL: test_umax_i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_max_u64 v[0:1], v[0:1], v[2:3]
+; CHECK-NEXT: s_set_pc_i64 s[30:31]
+ %r = call i64 @llvm.umax.i64(i64 %a, i64 %b)
+ ret i64 %r
+}
+
+define i64 @test_smin_i64(i64 %a, i64 %b) {
+; CHECK-LABEL: test_smin_i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_min_i64 v[0:1], v[0:1], v[2:3]
+; CHECK-NEXT: s_set_pc_i64 s[30:31]
+ %r = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+ ret i64 %r
+}
+
+define i64 @test_smax_i64(i64 %a, i64 %b) {
+; CHECK-LABEL: test_smax_i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_max_i64 v[0:1], v[0:1], v[2:3]
+; CHECK-NEXT: s_set_pc_i64 s[30:31]
+ %r = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+ ret i64 %r
+}
+
+define <4 x i64> @test_umin_v4i64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: test_umin_v4i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_min_u64 v[0:1], v[0:1], v[8:9]
+; CHECK-NEXT: v_min_u64 v[2:3], v[2:3], v[10:11]
+; CHECK-NEXT: v_min_u64 v[4:5], v[4:5], v[12:13]
+; CHECK-NEXT: v_min_u64 v[6:7], v[6:7], v[14:15]
+; CHECK-NEXT: s_set_pc_i64 s[30:31]
+ %r = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b)
+ ret <4 x i64> %r
+}
+
+define <4 x i64> @test_umax_v4i64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: test_umax_v4i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_max_u64 v[0:1], v[0:1], v[8:9]
+; CHECK-NEXT: v_max_u64 v[2:3], v[2:3], v[10:11]
+; CHECK-NEXT: v_max_u64 v[4:5], v[4:5], v[12:13]
+; CHECK-NEXT: v_max_u64 v[6:7], v[6:7], v[14:15]
+; CHECK-NEXT: s_set_pc_i64 s[30:31]
+ %r = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b)
+ ret <4 x i64> %r
+}
+
+define <4 x i64> @test_smin_v4i64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: test_smin_v4i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_min_i64 v[0:1], v[0:1], v[8:9]
+; CHECK-NEXT: v_min_i64 v[2:3], v[2:3], v[10:11]
+; CHECK-NEXT: v_min_i64 v[4:5], v[4:5], v[12:13]
+; CHECK-NEXT: v_min_i64 v[6:7], v[6:7], v[14:15]
+; CHECK-NEXT: s_set_pc_i64 s[30:31]
+ %r = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b)
+ ret <4 x i64> %r
+}
+
+define <4 x i64> @test_smax_v4i64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: test_smax_v4i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_max_i64 v[0:1], v[0:1], v[8:9]
+; CHECK-NEXT: v_max_i64 v[2:3], v[2:3], v[10:11]
+; CHECK-NEXT: v_max_i64 v[4:5], v[4:5], v[12:13]
+; CHECK-NEXT: v_max_i64 v[6:7], v[6:7], v[14:15]
+; CHECK-NEXT: s_set_pc_i64 s[30:31]
+ %r = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b)
+ ret <4 x i64> %r
+}
+
+define i64 @test_abs_i64(i64 %a) {
+; CHECK-LABEL: test_abs_i64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_mov_b32_e32 v3, v2
+; CHECK-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3]
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_xor_b32_e32 v0, v0, v2
+; CHECK-NEXT: v_xor_b32_e32 v1, v1, v2
+; CHECK-NEXT: s_set_pc_i64 s[30:31]
+ %r = call i64 @llvm.abs.i64(i64 %a, i1 0)
+ ret i64 %r
+}
+
+define amdgpu_ps i64 @test_umin_i64_s(i64 inreg %a, i64 inreg %b) {
+; CHECK-LABEL: test_umin_i64_s:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: v_min_u64 v[0:1], s[0:1], s[2:3]
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_readfirstlane_b32 s0, v0
+; CHECK-NEXT: v_readfirstlane_b32 s1, v1
+; CHECK-NEXT: ; return to shader part epilog
+ %r = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+ ret i64 %r
+}
+
+define amdgpu_ps i64 @test_umax_i64_s(i64 inreg %a, i64 inreg %b) {
+; CHECK-LABEL: test_umax_i64_s:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: v_max_u64 v[0:1], s[0:1], s[2:3]
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_readfirstlane_b32 s0, v0
+; CHECK-NEXT: v_readfirstlane_b32 s1, v1
+; CHECK-NEXT: ; return to shader part epilog
+ %r = call i64 @llvm.umax.i64(i64 %a, i64 %b)
+ ret i64 %r
+}
+
+define amdgpu_ps i64 @test_smin_i64_s(i64 inreg %a, i64 inreg %b) {
+; CHECK-LABEL: test_smin_i64_s:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: v_min_i64 v[0:1], s[0:1], s[2:3]
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_readfirstlane_b32 s0, v0
+; CHECK-NEXT: v_readfirstlane_b32 s1, v1
+; CHECK-NEXT: ; return to shader part epilog
+ %r = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+ ret i64 %r
+}
+
+define amdgpu_ps i64 @test_smax_i64_s(i64 inreg %a, i64 inreg %b) {
+; CHECK-LABEL: test_smax_i64_s:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: v_max_i64 v[0:1], s[0:1], s[2:3]
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_readfirstlane_b32 s0, v0
+; CHECK-NEXT: v_readfirstlane_b32 s1, v1
+; CHECK-NEXT: ; return to shader part epilog
+ %r = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+ ret i64 %r
+}
+
+define amdgpu_ps i64 @test_abs_i64_s(i64 inreg %a) {
+; CHECK-LABEL: test_abs_i64_s:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_ashr_i32 s2, s1, 31
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; CHECK-NEXT: s_mov_b32 s3, s2
+; CHECK-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3]
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; CHECK-NEXT: ; return to shader part epilog
+ %r = call i64 @llvm.abs.i64(i64 %a, i1 0)
+ ret i64 %r
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
index 6cc192c..3daae989 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
@@ -801,15 +801,15 @@ define i96 @v_mul_i96(i96 %num, i96 %den) {
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, v1
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_mul_lo_u32 v0, v6, v5
-; GFX1250-NEXT: v_mad_co_u64_u32 v[8:9], null, v7, v4, v[0:1]
-; GFX1250-NEXT: v_mad_co_u64_u32 v[0:1], null, v6, v3, 0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[8:9], null, v2, v3, v[8:9]
-; GFX1250-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v11, v8
+; GFX1250-NEXT: v_mul_lo_u32 v0, v7, v4
+; GFX1250-NEXT: v_mad_u32 v5, v6, v5, v0
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[0:1], v6, v3, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mad_u32 v9, v2, v3, v5
+; GFX1250-NEXT: v_mov_b32_e32 v8, v1
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[4:5], null, v6, v4, v[10:11]
-; GFX1250-NEXT: v_mad_co_u64_u32 v[2:3], null, v7, v3, v[4:5]
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[4:5], v6, v4, v[8:9]
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[2:3], v7, v3, v[4:5]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v3
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
@@ -1206,11 +1206,11 @@ define i128 @v_mul_i128(i128 %num, i128 %den) {
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_dual_mov_b32 v8, v0 :: v_dual_mov_b32 v9, v1
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[0:1], null, v8, v6, 0
-; GFX1250-NEXT: v_mad_co_u64_u32 v[10:11], null, v9, v5, v[0:1]
-; GFX1250-NEXT: v_mad_co_u64_u32 v[0:1], null, v8, v4, 0
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[0:1], v8, v6, 0
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[10:11], v9, v5, v[0:1]
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[0:1], v8, v4, 0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[10:11], null, v2, v4, v[10:11]
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[10:11], v2, v4, v[10:11]
; GFX1250-NEXT: v_mov_b32_e32 v12, v1
; GFX1250-NEXT: v_mul_lo_u32 v1, v9, v6
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -1220,15 +1220,13 @@ define i128 @v_mul_i128(i128 %num, i128 %den) {
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_mad_co_u64_u32 v[6:7], s0, v9, v4, v[12:13]
; GFX1250-NEXT: v_add_co_ci_u32_e64 v8, null, v11, v8, s0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX1250-NEXT: v_add_co_ci_u32_e64 v8, null, v8, v1, vcc_lo
-; GFX1250-NEXT: v_mov_b32_e32 v1, v6
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[8:9], null, v2, v5, v[8:9]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v1, null, v8, v1, vcc_lo
+; GFX1250-NEXT: v_mad_u32 v1, v2, v5, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1250-NEXT: v_mov_b32_e32 v2, v7
-; GFX1250-NEXT: v_mad_co_u64_u32 v[4:5], null, v3, v4, v[8:9]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_mov_b32_e32 v3, v4
+; GFX1250-NEXT: v_mad_u32 v3, v3, v4, v1
+; GFX1250-NEXT: v_mov_b32_e32 v1, v6
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = mul i128 %num, %den
ret i128 %result
@@ -2856,90 +2854,89 @@ define i256 @v_mul_i256(i256 %num, i256 %den) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], null, v0, v14, 0
-; GFX1250-NEXT: v_mad_co_u64_u32 v[18:19], null, v0, v12, 0
-; GFX1250-NEXT: v_mul_lo_u32 v26, v6, v9
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[16:17], v0, v14, 0
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[18:19], v0, v12, 0
+; GFX1250-NEXT: v_mul_lo_u32 v27, v5, v10
; GFX1250-NEXT: v_mul_lo_u32 v29, v3, v12
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], null, v1, v13, v[16:17]
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[16:17], v1, v13, v[16:17]
; GFX1250-NEXT: v_mad_co_u64_u32 v[18:19], s0, v1, v11, v[18:19]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-NEXT: v_cndmask_b32_e64 v20, 0, 1, s0
-; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], null, v2, v12, v[16:17]
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[16:17], v2, v12, v[16:17]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_mad_co_u64_u32 v[18:19], vcc_lo, v2, v10, v[18:19]
; GFX1250-NEXT: v_add_co_ci_u32_e64 v22, null, 0, v20, vcc_lo
-; GFX1250-NEXT: v_mad_co_u64_u32 v[20:21], null, v0, v10, 0
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[20:21], v0, v10, 0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], null, v3, v11, v[16:17]
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[16:17], v3, v11, v[16:17]
; GFX1250-NEXT: v_mad_co_u64_u32 v[18:19], vcc_lo, v3, v9, v[18:19]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-NEXT: v_add_co_ci_u32_e64 v24, null, 0, v22, vcc_lo
-; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], null, v4, v10, v[16:17]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[16:17], v4, v10, v[16:17]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_mad_co_u64_u32 v[18:19], vcc_lo, v4, v8, v[18:19]
-; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], null, v5, v9, v[16:17]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[22:23], null, v6, v8, v[16:17]
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v26, null, 0, v24, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[16:17], v5, v9, v[16:17]
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[22:23], v6, v8, v[16:17]
; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], s0, v1, v9, v[20:21]
-; GFX1250-NEXT: v_mov_b32_e32 v20, v19
-; GFX1250-NEXT: v_add_co_ci_u32_e64 v6, null, 0, v24, vcc_lo
-; GFX1250-NEXT: v_cndmask_b32_e64 v19, 0, 1, s0
-; GFX1250-NEXT: v_mov_b32_e32 v21, v22
-; GFX1250-NEXT: v_mul_lo_u32 v22, v5, v10
-; GFX1250-NEXT: v_mad_co_u64_u32 v[24:25], vcc_lo, v2, v8, v[16:17]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX1250-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v19, vcc_lo
-; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], s0, v0, v13, v[20:21]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX1250-NEXT: v_dual_mov_b32 v21, v18 :: v_dual_mov_b32 v20, v25
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX1250-NEXT: v_dual_mov_b32 v20, v19 :: v_dual_mov_b32 v21, v22
+; GFX1250-NEXT: v_mul_lo_u32 v22, v6, v9
+; GFX1250-NEXT: v_cndmask_b32_e64 v6, 0, 1, s0
+; GFX1250-NEXT: v_mad_co_u64_u32 v[24:25], s0, v2, v8, v[16:17]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_mad_co_u64_u32 v[20:21], vcc_lo, v0, v13, v[20:21]
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v6, null, 0, v6, s0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], s0, v1, v12, v[20:21]
+; GFX1250-NEXT: v_dual_mov_b32 v20, v25 :: v_dual_mov_b32 v21, v18
; GFX1250-NEXT: v_mul_lo_u32 v25, v4, v11
-; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], vcc_lo, v1, v12, v[16:17]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_mad_co_u64_u32 v[18:19], s2, v0, v11, v[20:21]
; GFX1250-NEXT: v_cndmask_b32_e64 v28, 0, 1, s2
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-NEXT: v_mad_co_u64_u32 v[20:21], s1, v2, v11, v[16:17]
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[16:17], v0, v8, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-NEXT: v_mad_co_u64_u32 v[18:19], s2, v1, v10, v[18:19]
-; GFX1250-NEXT: v_mad_co_u64_u32 v[16:17], null, v0, v8, 0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
; GFX1250-NEXT: v_mad_co_u64_u32 v[10:11], s3, v3, v10, v[20:21]
; GFX1250-NEXT: v_mul_lo_u32 v20, v2, v13
; GFX1250-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v28, s2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX1250-NEXT: v_mad_co_u64_u32 v[12:13], s2, v2, v9, v[18:19]
; GFX1250-NEXT: v_dual_mov_b32 v18, v17 :: v_dual_mov_b32 v19, v24
-; GFX1250-NEXT: v_add_co_ci_u32_e64 v2, null, 0, v21, s2
; GFX1250-NEXT: v_mad_co_u64_u32 v[10:11], s4, v4, v9, v[10:11]
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v2, null, 0, v21, s2
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1250-NEXT: v_mad_co_u64_u32 v[18:19], s6, v0, v9, v[18:19]
; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v15
; GFX1250-NEXT: v_mad_co_u64_u32 v[12:13], s2, v3, v8, v[12:13]
; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s6
; GFX1250-NEXT: v_mul_lo_u32 v9, v1, v14
-; GFX1250-NEXT: v_add_co_ci_u32_e64 v2, null, 0, v2, s2
; GFX1250-NEXT: v_mad_co_u64_u32 v[10:11], s5, v5, v8, v[10:11]
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v2, null, 0, v2, s2
; GFX1250-NEXT: v_mad_co_u64_u32 v[14:15], s2, v1, v8, v[18:19]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_add_co_ci_u32_e64 v3, s2, v3, v12, s2
-; GFX1250-NEXT: v_add_co_ci_u32_e64 v4, s2, v27, v13, s2
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v4, s2, v6, v13, s2
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_add_co_ci_u32_e64 v5, s2, v2, v10, s2
-; GFX1250-NEXT: v_add_co_ci_u32_e64 v6, s2, v6, v11, s2
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v6, s2, v26, v11, s2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX1250-NEXT: v_add_co_ci_u32_e64 v0, null, v23, v0, s2
+; GFX1250-NEXT: v_dual_mov_b32 v2, v15 :: v_dual_mov_b32 v1, v14
; GFX1250-NEXT: v_add_co_ci_u32_e64 v0, null, v0, v9, s5
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_add_co_ci_u32_e64 v0, null, v0, v20, s4
; GFX1250-NEXT: v_add_co_ci_u32_e64 v0, null, v0, v29, s3
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_add_co_ci_u32_e64 v0, null, v0, v25, s1
-; GFX1250-NEXT: v_add_co_ci_u32_e64 v0, null, v0, v22, vcc_lo
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v0, null, v0, v27, s0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_add_co_ci_u32_e64 v0, null, v0, v26, s0
-; GFX1250-NEXT: v_mad_co_u64_u32 v[8:9], null, v7, v8, v[0:1]
-; GFX1250-NEXT: v_dual_mov_b32 v0, v16 :: v_dual_mov_b32 v1, v14
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1250-NEXT: v_dual_mov_b32 v2, v15 :: v_dual_mov_b32 v7, v8
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v0, null, v0, v22, vcc_lo
+; GFX1250-NEXT: v_mad_u32 v7, v7, v8, v0
+; GFX1250-NEXT: v_mov_b32_e32 v0, v16
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = mul i256 %num, %den
ret i256 %result
@@ -3004,7 +3001,7 @@ define amdgpu_ps void @s_mul_u64_zext_with_vregs(ptr addrspace(1) %out, ptr addr
; GFX1250: ; %bb.0:
; GFX1250-NEXT: global_load_b32 v2, v[2:3], off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_mad_co_u64_u32 v[2:3], null, 0x50, v2, 0
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[2:3], 0x50, v2, 0
; GFX1250-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX1250-NEXT: s_endpgm
%val = load i32, ptr addrspace(1) %in, align 4
@@ -3195,7 +3192,7 @@ define amdgpu_ps void @s_mul_u64_sext_with_vregs(ptr addrspace(1) %out, ptr addr
; GFX1250: ; %bb.0:
; GFX1250-NEXT: global_load_b32 v2, v[2:3], off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_mad_co_i64_i32 v[2:3], null, 0x50, v2, 0
+; GFX1250-NEXT: v_mad_nc_i64_i32 v[2:3], 0x50, v2, 0
; GFX1250-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX1250-NEXT: s_endpgm
%val = load i32, ptr addrspace(1) %in, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll
index f88c67a..89681e7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll
@@ -699,7 +699,7 @@ define amdgpu_ps void @s_buffer_load_i256_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GFX7-NEXT: [[UV:%[0-9]+]]:vgpr(s128), [[UV1:%[0-9]+]]:vgpr(s128) = G_UNMERGE_VALUES [[MV]](s256)
; GFX7-NEXT: G_STORE [[UV]](s128), [[DEF]](p1) :: (store (s128) into `ptr addrspace(1) poison`, align 8, addrspace 1)
; GFX7-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX7-NEXT: G_STORE [[UV1]](s128), [[PTR_ADD]](p1) :: (store (s128) into `ptr addrspace(1) poison` + 16, align 8, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
;
@@ -723,7 +723,7 @@ define amdgpu_ps void @s_buffer_load_i256_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1)
; GFX12-NEXT: G_STORE [[UV]](s128), [[COPY5]](p1) :: (store (s128) into `ptr addrspace(1) poison`, align 8, addrspace 1)
; GFX12-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD]](p1)
; GFX12-NEXT: G_STORE [[UV1]](s128), [[COPY6]](p1) :: (store (s128) into `ptr addrspace(1) poison` + 16, align 8, addrspace 1)
; GFX12-NEXT: S_ENDPGM 0
@@ -755,13 +755,13 @@ define amdgpu_ps void @s_buffer_load_i512_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GFX7-NEXT: [[UV:%[0-9]+]]:vgpr(s128), [[UV1:%[0-9]+]]:vgpr(s128), [[UV2:%[0-9]+]]:vgpr(s128), [[UV3:%[0-9]+]]:vgpr(s128) = G_UNMERGE_VALUES [[MV]](s512)
; GFX7-NEXT: G_STORE [[UV]](s128), [[DEF]](p1) :: (store (s128) into `ptr addrspace(1) poison`, align 8, addrspace 1)
; GFX7-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX7-NEXT: G_STORE [[UV1]](s128), [[PTR_ADD]](p1) :: (store (s128) into `ptr addrspace(1) poison` + 16, align 8, addrspace 1)
; GFX7-NEXT: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
- ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
+ ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C3]](s64)
; GFX7-NEXT: G_STORE [[UV2]](s128), [[PTR_ADD1]](p1) :: (store (s128) into `ptr addrspace(1) poison` + 32, align 8, addrspace 1)
; GFX7-NEXT: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
- ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
+ ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C4]](s64)
; GFX7-NEXT: G_STORE [[UV3]](s128), [[PTR_ADD2]](p1) :: (store (s128) into `ptr addrspace(1) poison` + 48, align 8, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
;
@@ -787,15 +787,15 @@ define amdgpu_ps void @s_buffer_load_i512_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1)
; GFX12-NEXT: G_STORE [[UV]](s128), [[COPY5]](p1) :: (store (s128) into `ptr addrspace(1) poison`, align 8, addrspace 1)
; GFX12-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD]](p1)
; GFX12-NEXT: G_STORE [[UV1]](s128), [[COPY6]](p1) :: (store (s128) into `ptr addrspace(1) poison` + 16, align 8, addrspace 1)
; GFX12-NEXT: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C3]](s64)
; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD1]](p1)
; GFX12-NEXT: G_STORE [[UV2]](s128), [[COPY7]](p1) :: (store (s128) into `ptr addrspace(1) poison` + 32, align 8, addrspace 1)
; GFX12-NEXT: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
- ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
+ ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C4]](s64)
; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD2]](p1)
; GFX12-NEXT: G_STORE [[UV3]](s128), [[COPY8]](p1) :: (store (s128) into `ptr addrspace(1) poison` + 48, align 8, addrspace 1)
; GFX12-NEXT: S_ENDPGM 0
@@ -825,7 +825,7 @@ define amdgpu_ps void @s_buffer_load_v16i16_vgpr_offset(<4 x i32> inreg %rsrc, i
; GFX7-NEXT: [[UV:%[0-9]+]]:vgpr(<8 x s16>), [[UV1:%[0-9]+]]:vgpr(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s16>)
; GFX7-NEXT: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison`, align 32, addrspace 1)
; GFX7-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX7-NEXT: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison` + 16, basealign 32, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
;
@@ -849,7 +849,7 @@ define amdgpu_ps void @s_buffer_load_v16i16_vgpr_offset(<4 x i32> inreg %rsrc, i
; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1)
; GFX12-NEXT: G_STORE [[UV]](<8 x s16>), [[COPY5]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison`, align 32, addrspace 1)
; GFX12-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD]](p1)
; GFX12-NEXT: G_STORE [[UV1]](<8 x s16>), [[COPY6]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison` + 16, basealign 32, addrspace 1)
; GFX12-NEXT: S_ENDPGM 0
@@ -881,13 +881,13 @@ define amdgpu_ps void @s_buffer_load_v32i16_vgpr_offset(<4 x i32> inreg %rsrc, i
; GFX7-NEXT: [[UV:%[0-9]+]]:vgpr(<8 x s16>), [[UV1:%[0-9]+]]:vgpr(<8 x s16>), [[UV2:%[0-9]+]]:vgpr(<8 x s16>), [[UV3:%[0-9]+]]:vgpr(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<32 x s16>)
; GFX7-NEXT: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison`, align 64, addrspace 1)
; GFX7-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX7-NEXT: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison` + 16, basealign 64, addrspace 1)
; GFX7-NEXT: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
- ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
+ ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C3]](s64)
; GFX7-NEXT: G_STORE [[UV2]](<8 x s16>), [[PTR_ADD1]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison` + 32, align 32, basealign 64, addrspace 1)
; GFX7-NEXT: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
- ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
+ ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C4]](s64)
; GFX7-NEXT: G_STORE [[UV3]](<8 x s16>), [[PTR_ADD2]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison` + 48, basealign 64, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
;
@@ -913,15 +913,15 @@ define amdgpu_ps void @s_buffer_load_v32i16_vgpr_offset(<4 x i32> inreg %rsrc, i
; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1)
; GFX12-NEXT: G_STORE [[UV]](<8 x s16>), [[COPY5]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison`, align 64, addrspace 1)
; GFX12-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD]](p1)
; GFX12-NEXT: G_STORE [[UV1]](<8 x s16>), [[COPY6]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison` + 16, basealign 64, addrspace 1)
; GFX12-NEXT: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C3]](s64)
; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD1]](p1)
; GFX12-NEXT: G_STORE [[UV2]](<8 x s16>), [[COPY7]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison` + 32, align 32, basealign 64, addrspace 1)
; GFX12-NEXT: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
- ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
+ ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C4]](s64)
; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD2]](p1)
; GFX12-NEXT: G_STORE [[UV3]](<8 x s16>), [[COPY8]](p1) :: (store (<8 x s16>) into `ptr addrspace(1) poison` + 48, basealign 64, addrspace 1)
; GFX12-NEXT: S_ENDPGM 0
@@ -951,7 +951,7 @@ define amdgpu_ps void @s_buffer_load_v4i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
; GFX7-NEXT: [[UV:%[0-9]+]]:vgpr(<2 x s64>), [[UV1:%[0-9]+]]:vgpr(<2 x s64>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s64>)
; GFX7-NEXT: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison`, align 32, addrspace 1)
; GFX7-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX7-NEXT: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison` + 16, basealign 32, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
;
@@ -975,7 +975,7 @@ define amdgpu_ps void @s_buffer_load_v4i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1)
; GFX12-NEXT: G_STORE [[UV]](<2 x s64>), [[COPY5]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison`, align 32, addrspace 1)
; GFX12-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD]](p1)
; GFX12-NEXT: G_STORE [[UV1]](<2 x s64>), [[COPY6]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison` + 16, basealign 32, addrspace 1)
; GFX12-NEXT: S_ENDPGM 0
@@ -1007,13 +1007,13 @@ define amdgpu_ps void @s_buffer_load_v8i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
; GFX7-NEXT: [[UV:%[0-9]+]]:vgpr(<2 x s64>), [[UV1:%[0-9]+]]:vgpr(<2 x s64>), [[UV2:%[0-9]+]]:vgpr(<2 x s64>), [[UV3:%[0-9]+]]:vgpr(<2 x s64>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s64>)
; GFX7-NEXT: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison`, align 64, addrspace 1)
; GFX7-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX7-NEXT: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison` + 16, basealign 64, addrspace 1)
; GFX7-NEXT: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
- ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
+ ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C3]](s64)
; GFX7-NEXT: G_STORE [[UV2]](<2 x s64>), [[PTR_ADD1]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison` + 32, align 32, basealign 64, addrspace 1)
; GFX7-NEXT: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
- ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
+ ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C4]](s64)
; GFX7-NEXT: G_STORE [[UV3]](<2 x s64>), [[PTR_ADD2]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison` + 48, basealign 64, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
;
@@ -1039,15 +1039,15 @@ define amdgpu_ps void @s_buffer_load_v8i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1)
; GFX12-NEXT: G_STORE [[UV]](<2 x s64>), [[COPY5]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison`, align 64, addrspace 1)
; GFX12-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD]](p1)
; GFX12-NEXT: G_STORE [[UV1]](<2 x s64>), [[COPY6]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison` + 16, basealign 64, addrspace 1)
; GFX12-NEXT: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C3]](s64)
; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD1]](p1)
; GFX12-NEXT: G_STORE [[UV2]](<2 x s64>), [[COPY7]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison` + 32, align 32, basealign 64, addrspace 1)
; GFX12-NEXT: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
- ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
+ ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C4]](s64)
; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD2]](p1)
; GFX12-NEXT: G_STORE [[UV3]](<2 x s64>), [[COPY8]](p1) :: (store (<2 x s64>) into `ptr addrspace(1) poison` + 48, basealign 64, addrspace 1)
; GFX12-NEXT: S_ENDPGM 0
@@ -1077,7 +1077,7 @@ define amdgpu_ps void @s_buffer_load_v4p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GFX7-NEXT: [[UV:%[0-9]+]]:vgpr(<2 x p1>), [[UV1:%[0-9]+]]:vgpr(<2 x p1>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x p1>)
; GFX7-NEXT: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison`, align 32, addrspace 1)
; GFX7-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX7-NEXT: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison` + 16, basealign 32, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
;
@@ -1101,7 +1101,7 @@ define amdgpu_ps void @s_buffer_load_v4p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1)
; GFX12-NEXT: G_STORE [[UV]](<2 x p1>), [[COPY5]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison`, align 32, addrspace 1)
; GFX12-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD]](p1)
; GFX12-NEXT: G_STORE [[UV1]](<2 x p1>), [[COPY6]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison` + 16, basealign 32, addrspace 1)
; GFX12-NEXT: S_ENDPGM 0
@@ -1133,13 +1133,13 @@ define amdgpu_ps void @s_buffer_load_v8p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GFX7-NEXT: [[UV:%[0-9]+]]:vgpr(<2 x p1>), [[UV1:%[0-9]+]]:vgpr(<2 x p1>), [[UV2:%[0-9]+]]:vgpr(<2 x p1>), [[UV3:%[0-9]+]]:vgpr(<2 x p1>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x p1>)
; GFX7-NEXT: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison`, align 64, addrspace 1)
; GFX7-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX7-NEXT: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison` + 16, basealign 64, addrspace 1)
; GFX7-NEXT: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
- ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
+ ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C3]](s64)
; GFX7-NEXT: G_STORE [[UV2]](<2 x p1>), [[PTR_ADD1]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison` + 32, align 32, basealign 64, addrspace 1)
; GFX7-NEXT: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
- ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
+ ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C4]](s64)
; GFX7-NEXT: G_STORE [[UV3]](<2 x p1>), [[PTR_ADD2]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison` + 48, basealign 64, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
;
@@ -1165,15 +1165,15 @@ define amdgpu_ps void @s_buffer_load_v8p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr(p1) = COPY [[DEF]](p1)
; GFX12-NEXT: G_STORE [[UV]](<2 x p1>), [[COPY5]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison`, align 64, addrspace 1)
; GFX12-NEXT: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
- ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
+ ; GFX12-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C2]](s64)
; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD]](p1)
; GFX12-NEXT: G_STORE [[UV1]](<2 x p1>), [[COPY6]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison` + 16, basealign 64, addrspace 1)
; GFX12-NEXT: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
- ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
+ ; GFX12-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C3]](s64)
; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD1]](p1)
; GFX12-NEXT: G_STORE [[UV2]](<2 x p1>), [[COPY7]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison` + 32, align 32, basealign 64, addrspace 1)
; GFX12-NEXT: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
- ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
+ ; GFX12-NEXT: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = nuw inbounds G_PTR_ADD [[DEF]], [[C4]](s64)
; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr(p1) = COPY [[PTR_ADD2]](p1)
; GFX12-NEXT: G_STORE [[UV3]](<2 x p1>), [[COPY8]](p1) :: (store (<2 x p1>) into `ptr addrspace(1) poison` + 48, basealign 64, addrspace 1)
; GFX12-NEXT: S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
index bf1dcad..1b64099 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
@@ -121,7 +121,7 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY1]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v8i32, align 32, addrspace 1)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v8i32 + 16, basealign 32, addrspace 1)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s32>)
@@ -154,7 +154,7 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY1]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v4i64, align 32, addrspace 1)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v4i64 + 16, basealign 32, addrspace 1)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(s64), [[UV1:%[0-9]+]]:vgpr(s64), [[UV2:%[0-9]+]]:vgpr(s64), [[UV3:%[0-9]+]]:vgpr(s64) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s64>)
@@ -194,13 +194,13 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY1]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v16i32, align 64, addrspace 1)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v16i32 + 16, basealign 64, addrspace 1)
; GCN-NEXT: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
- ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; GCN-NEXT: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v16i32 + 32, align 32, basealign 64, addrspace 1)
; GCN-NEXT: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
- ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; GCN-NEXT: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v16i32 + 48, basealign 64, addrspace 1)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s32>)
@@ -240,13 +240,13 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY1]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v8i64, align 64, addrspace 1)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v8i64 + 16, basealign 64, addrspace 1)
; GCN-NEXT: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
- ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; GCN-NEXT: [[LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v8i64 + 32, align 32, basealign 64, addrspace 1)
; GCN-NEXT: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
- ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; GCN-NEXT: [[LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v8i64 + 48, basealign 64, addrspace 1)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>), [[LOAD2]](<2 x s64>), [[LOAD3]](<2 x s64>)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(s64), [[UV1:%[0-9]+]]:vgpr(s64), [[UV2:%[0-9]+]]:vgpr(s64), [[UV3:%[0-9]+]]:vgpr(s64), [[UV4:%[0-9]+]]:vgpr(s64), [[UV5:%[0-9]+]]:vgpr(s64), [[UV6:%[0-9]+]]:vgpr(s64), [[UV7:%[0-9]+]]:vgpr(s64) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s64>)
@@ -370,7 +370,7 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY1]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v8i32, align 32, addrspace 4)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v8i32 + 16, basealign 32, addrspace 4)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s32>)
@@ -402,7 +402,7 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(s128) = G_LOAD [[COPY1]](p4) :: (load (s128) from %ir.constant.not.uniform, align 32, addrspace 4)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(s128) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from %ir.constant.not.uniform + 16, basealign 32, addrspace 4)
; GCN-NEXT: [[MV:%[0-9]+]]:vgpr(s256) = G_MERGE_VALUES [[LOAD]](s128), [[LOAD1]](s128)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[MV]](s256)
@@ -435,7 +435,7 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<8 x s16>) = G_LOAD [[COPY1]](p4) :: (load (<8 x s16>) from %ir.constant.not.uniform, align 32, addrspace 4)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<8 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (load (<8 x s16>) from %ir.constant.not.uniform + 16, basealign 32, addrspace 4)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s16>) = G_CONCAT_VECTORS [[LOAD]](<8 x s16>), [[LOAD1]](<8 x s16>)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(<2 x s16>), [[UV1:%[0-9]+]]:vgpr(<2 x s16>), [[UV2:%[0-9]+]]:vgpr(<2 x s16>), [[UV3:%[0-9]+]]:vgpr(<2 x s16>), [[UV4:%[0-9]+]]:vgpr(<2 x s16>), [[UV5:%[0-9]+]]:vgpr(<2 x s16>), [[UV6:%[0-9]+]]:vgpr(<2 x s16>), [[UV7:%[0-9]+]]:vgpr(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s16>)
@@ -467,7 +467,7 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY1]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v4i64, align 32, addrspace 4)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v4i64 + 16, basealign 32, addrspace 4)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(s64), [[UV1:%[0-9]+]]:vgpr(s64), [[UV2:%[0-9]+]]:vgpr(s64), [[UV3:%[0-9]+]]:vgpr(s64) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s64>)
@@ -507,13 +507,13 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY1]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v16i32, align 64, addrspace 4)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v16i32 + 16, basealign 64, addrspace 4)
; GCN-NEXT: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
- ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; GCN-NEXT: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v16i32 + 32, align 32, basealign 64, addrspace 4)
; GCN-NEXT: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
- ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; GCN-NEXT: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v16i32 + 48, basealign 64, addrspace 4)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s32>)
@@ -553,13 +553,13 @@ body: |
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY1]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v8i64, align 64, addrspace 4)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v8i64 + 16, basealign 64, addrspace 4)
; GCN-NEXT: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
- ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; GCN-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; GCN-NEXT: [[LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD1]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v8i64 + 32, align 32, basealign 64, addrspace 4)
; GCN-NEXT: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
- ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; GCN-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; GCN-NEXT: [[LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD2]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v8i64 + 48, basealign 64, addrspace 4)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>), [[LOAD2]](<2 x s64>), [[LOAD3]](<2 x s64>)
; GCN-NEXT: [[UV:%[0-9]+]]:vgpr(s64), [[UV1:%[0-9]+]]:vgpr(s64), [[UV2:%[0-9]+]]:vgpr(s64), [[UV3:%[0-9]+]]:vgpr(s64), [[UV4:%[0-9]+]]:vgpr(s64), [[UV5:%[0-9]+]]:vgpr(s64), [[UV6:%[0-9]+]]:vgpr(s64), [[UV7:%[0-9]+]]:vgpr(s64) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s64>)
@@ -905,7 +905,7 @@ body: |
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr(p4) = COPY $vgpr0_vgpr1
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 32, addrspace 4)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from unknown-address + 16, addrspace 4)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
%0:_(p4) = COPY $vgpr0_vgpr1
@@ -933,7 +933,7 @@ body: |
; GCN-NEXT: [[PHI:%[0-9]+]]:vgpr(p4) = G_PHI [[COPY]](p4), %bb.0, %3(p4), %bb.1
; GCN-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PHI]](p4) :: (load (<4 x s32>), align 32, addrspace 4)
; GCN-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PHI]], [[C]](s64)
+ ; GCN-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD [[PHI]], [[C]](s64)
; GCN-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from unknown-address + 16, addrspace 4)
; GCN-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr(p4) = COPY [[COPY1]](p4)
@@ -967,7 +967,7 @@ body: |
; GFX7-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; GFX7-NEXT: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (invariant load (<2 x s32>), align 4, addrspace 4)
; GFX7-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load (s32) from unknown-address + 8, addrspace 4)
; GFX7-NEXT: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; GFX7-NEXT: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -998,7 +998,7 @@ body: |
; GFX7-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; GFX7-NEXT: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (invariant load (<2 x s32>), addrspace 4)
; GFX7-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load (s32) from unknown-address + 8, align 8, addrspace 4)
; GFX7-NEXT: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; GFX7-NEXT: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -1057,7 +1057,7 @@ body: |
; GFX7-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; GFX7-NEXT: [[LOAD:%[0-9]+]]:sgpr(<4 x s16>) = G_LOAD [[COPY]](p4) :: (invariant load (<4 x s16>), align 4, addrspace 4)
; GFX7-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:sgpr(<2 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (invariant load (<2 x s16>) from unknown-address + 8, addrspace 4)
; GFX7-NEXT: [[UV:%[0-9]+]]:sgpr(<2 x s16>), [[UV1:%[0-9]+]]:sgpr(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
; GFX7-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:sgpr(<6 x s16>) = G_CONCAT_VECTORS [[UV]](<2 x s16>), [[UV1]](<2 x s16>), [[LOAD1]](<2 x s16>)
@@ -1088,7 +1088,7 @@ body: |
; GFX7-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; GFX7-NEXT: [[LOAD:%[0-9]+]]:sgpr(<4 x s16>) = G_LOAD [[COPY]](p4) :: (invariant load (<4 x s16>), addrspace 4)
; GFX7-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:sgpr(<2 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (invariant load (<2 x s16>) from unknown-address + 8, align 8, addrspace 4)
; GFX7-NEXT: [[UV:%[0-9]+]]:sgpr(<2 x s16>), [[UV1:%[0-9]+]]:sgpr(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
; GFX7-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:sgpr(<6 x s16>) = G_CONCAT_VECTORS [[UV]](<2 x s16>), [[UV1]](<2 x s16>), [[LOAD1]](<2 x s16>)
@@ -1147,7 +1147,7 @@ body: |
; GFX7-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; GFX7-NEXT: [[LOAD:%[0-9]+]]:sgpr(s64) = G_LOAD [[COPY]](p4) :: (invariant load (s64), align 4, addrspace 4)
; GFX7-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load (s32) from unknown-address + 8, addrspace 4)
; GFX7-NEXT: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
; GFX7-NEXT: [[MV:%[0-9]+]]:sgpr(s96) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
@@ -1178,7 +1178,7 @@ body: |
; GFX7-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; GFX7-NEXT: [[LOAD:%[0-9]+]]:sgpr(s64) = G_LOAD [[COPY]](p4) :: (invariant load (s64), addrspace 4)
; GFX7-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load (s32) from unknown-address + 8, align 8, addrspace 4)
; GFX7-NEXT: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
; GFX7-NEXT: [[MV:%[0-9]+]]:sgpr(s96) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir
index d15919f..2177cd7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir
@@ -36,11 +36,12 @@ body: |
; GFX7-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; GFX7-NEXT: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (load (<2 x s32>), addrspace 4)
; GFX7-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s32) from unknown-address + 8, align 8, addrspace 4)
; GFX7-NEXT: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; GFX7-NEXT: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
; GFX7-NEXT: $sgpr0_sgpr1_sgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+ ;
; GFX12-LABEL: name: split_smrd_load_range
; GFX12: liveins: $sgpr0_sgpr1
; GFX12-NEXT: {{ $}}
@@ -66,11 +67,12 @@ body: |
; GFX7-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; GFX7-NEXT: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (load (<2 x s32>), !tbaa !2, addrspace 4)
; GFX7-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s32) from unknown-address + 8, align 8, !tbaa !2, addrspace 4)
; GFX7-NEXT: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; GFX7-NEXT: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<3 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[LOAD1]](s32)
; GFX7-NEXT: $sgpr0_sgpr1_sgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+ ;
; GFX12-LABEL: name: split_smrd_load_tbaa
; GFX12: liveins: $sgpr0_sgpr1
; GFX12-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uniform-load-noclobber.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uniform-load-noclobber.mir
index 8159f1b..efdf4b7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uniform-load-noclobber.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-uniform-load-noclobber.mir
@@ -17,13 +17,13 @@ body: |
; GFX7-NEXT: %out_addr:sgpr(p1) = COPY $sgpr2_sgpr3
; GFX7-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %in_addr(p1) :: (load (<4 x s32>), align 4, addrspace 1)
; GFX7-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD %in_addr, [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<4 x s32>) from unknown-address + 16, align 4, addrspace 1)
; GFX7-NEXT: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
- ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C1]](s64)
+ ; GFX7-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD %in_addr, [[C1]](s64)
; GFX7-NEXT: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<4 x s32>) from unknown-address + 32, align 4, addrspace 1)
; GFX7-NEXT: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
- ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C2]](s64)
+ ; GFX7-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD %in_addr, [[C2]](s64)
; GFX7-NEXT: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<4 x s32>) from unknown-address + 48, align 4, addrspace 1)
; GFX7-NEXT: %load:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX7-NEXT: %load0_3:vgpr(<4 x s32>), %load4_7:vgpr(<4 x s32>), %load8_11:vgpr(<4 x s32>), %load12_15:vgpr(<4 x s32>) = G_UNMERGE_VALUES %load(<16 x s32>)
@@ -38,6 +38,7 @@ body: |
; GFX7-NEXT: %out_addr_plus_48:sgpr(p1) = G_PTR_ADD %out_addr, %cst48(s64)
; GFX7-NEXT: G_STORE %load12_15(<4 x s32>), %out_addr_plus_48(p1) :: (store (<4 x s32>), align 4, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
+ ;
; GFX1010-LABEL: name: test_uniform_load_without_noclobber
; GFX1010: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
; GFX1010-NEXT: {{ $}}
@@ -46,13 +47,13 @@ body: |
; GFX1010-NEXT: [[COPY:%[0-9]+]]:vgpr(p1) = COPY %in_addr(p1)
; GFX1010-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %in_addr(p1) :: (load (<4 x s32>), align 4, addrspace 1)
; GFX1010-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GFX1010-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C]](s64)
+ ; GFX1010-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD %in_addr, [[C]](s64)
; GFX1010-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<4 x s32>) from unknown-address + 16, align 4, addrspace 1)
; GFX1010-NEXT: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
- ; GFX1010-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C1]](s64)
+ ; GFX1010-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD %in_addr, [[C1]](s64)
; GFX1010-NEXT: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<4 x s32>) from unknown-address + 32, align 4, addrspace 1)
; GFX1010-NEXT: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
- ; GFX1010-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C2]](s64)
+ ; GFX1010-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = nuw inbounds G_PTR_ADD %in_addr, [[C2]](s64)
; GFX1010-NEXT: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<4 x s32>) from unknown-address + 48, align 4, addrspace 1)
; GFX1010-NEXT: %load:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
; GFX1010-NEXT: %load0_3:vgpr(<4 x s32>), %load4_7:vgpr(<4 x s32>), %load8_11:vgpr(<4 x s32>), %load12_15:vgpr(<4 x s32>) = G_UNMERGE_VALUES %load(<16 x s32>)
@@ -103,7 +104,7 @@ body: |
; GFX7-NEXT: %out:sgpr(p1) = COPY $sgpr2_sgpr3
; GFX7-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %ptr(p4) :: (load (<4 x s32>), align 1, addrspace 4)
; GFX7-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD %ptr, [[C]](s64)
+ ; GFX7-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; GFX7-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from unknown-address + 16, align 1, addrspace 4)
; GFX7-NEXT: %load:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX7-NEXT: %load0_3:vgpr(<4 x s32>), %load4_7:vgpr(<4 x s32>) = G_UNMERGE_VALUES %load(<8 x s32>)
@@ -112,6 +113,7 @@ body: |
; GFX7-NEXT: %out_plus_16:sgpr(p1) = G_PTR_ADD %out, %cst_16(s64)
; GFX7-NEXT: G_STORE %load4_7(<4 x s32>), %out_plus_16(p1) :: (store (<4 x s32>), align 32, addrspace 1)
; GFX7-NEXT: S_ENDPGM 0
+ ;
; GFX1010-LABEL: name: test_s_load_constant_v8i32_align1
; GFX1010: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
; GFX1010-NEXT: {{ $}}
@@ -120,7 +122,7 @@ body: |
; GFX1010-NEXT: [[COPY:%[0-9]+]]:vgpr(p4) = COPY %ptr(p4)
; GFX1010-NEXT: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %ptr(p4) :: (load (<4 x s32>), align 1, addrspace 4)
; GFX1010-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
- ; GFX1010-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD %ptr, [[C]](s64)
+ ; GFX1010-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64)
; GFX1010-NEXT: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from unknown-address + 16, align 1, addrspace 4)
; GFX1010-NEXT: %load:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
; GFX1010-NEXT: %load0_3:vgpr(<4 x s32>), %load4_7:vgpr(<4 x s32>) = G_UNMERGE_VALUES %load(<8 x s32>)
diff --git a/llvm/test/CodeGen/AMDGPU/add-max.ll b/llvm/test/CodeGen/AMDGPU/add-max.ll
index b992506..00c6656 100644
--- a/llvm/test/CodeGen/AMDGPU/add-max.ll
+++ b/llvm/test/CodeGen/AMDGPU/add-max.ll
@@ -5,9 +5,7 @@
define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, v0, v2
+; GCN-NEXT: v_add_max_u32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -18,9 +16,7 @@ define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_svv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, v0, v1
+; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, v1
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -29,12 +25,17 @@ define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
}
define amdgpu_ps float @add_max_u32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
-; GCN-LABEL: add_max_u32_ssv:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_add_co_i32 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_max_u32_e32 v0, s0, v0
-; GCN-NEXT: ; return to shader part epilog
+; SDAG-LABEL: add_max_u32_ssv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_add_max_u32_e64 v0, s0, s1, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_u32_ssv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_add_co_i32 s0, s0, s1
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT: v_max_u32_e32 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
%ret = bitcast i32 %max to float
@@ -58,9 +59,7 @@ define amdgpu_ps float @add_max_u32_sss(i32 inreg %a, i32 inreg %b, i32 inreg %c
define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
; GCN-LABEL: add_max_u32_vsi:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, 4, v0
+; GCN-NEXT: v_add_max_u32_e64 v0, v0, s0, 4
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 4)
@@ -71,9 +70,7 @@ define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
; GCN-LABEL: add_max_u32_svl:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, 0x64, v0
+; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, 0x64
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 100)
@@ -82,12 +79,17 @@ define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
}
define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
-; GCN-LABEL: add_max_u32_slv:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_addk_co_i32 s0, 0x64
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_max_u32_e32 v0, s0, v0
-; GCN-NEXT: ; return to shader part epilog
+; SDAG-LABEL: add_max_u32_slv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_add_max_u32_e64 v0, 0x64, s0, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_u32_slv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_addk_co_i32 s0, 0x64
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT: v_max_u32_e32 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
%add = add i32 %a, 100
%max = call i32 @llvm.umax.i32(i32 %add, i32 %b)
%ret = bitcast i32 %max to float
@@ -97,9 +99,7 @@ define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_i32_e32 v0, v0, v2
+; GCN-NEXT: v_add_max_i32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smax.i32(i32 %add, i32 %c)
@@ -110,9 +110,7 @@ define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_min_u32_e32 v0, v0, v2
+; GCN-NEXT: v_add_min_u32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umin.i32(i32 %add, i32 %c)
@@ -123,9 +121,7 @@ define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_min_i32_e32 v0, v0, v2
+; GCN-NEXT: v_add_min_i32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smin.i32(i32 %add, i32 %c)
diff --git a/llvm/test/CodeGen/AMDGPU/bitop3.ll b/llvm/test/CodeGen/AMDGPU/bitop3.ll
index eb149a93..ba818f6 100644
--- a/llvm/test/CodeGen/AMDGPU/bitop3.ll
+++ b/llvm/test/CodeGen/AMDGPU/bitop3.ll
@@ -1,6 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel=0 -mtriple=amdgcn-- -mcpu=gfx950 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX950,GFX950-SDAG %s
; RUN: llc -global-isel -mtriple=amdgcn-- -mcpu=gfx950 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX950,GFX950-GISEL %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-- -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX1250,GFX1250-SDAG,GFX1250-SDAG-FAKE16,GFX1250-FAKE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-- -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX1250,GFX1250-SDAG,GFX1250-SDAG-TRUE16,GFX1250-TRUE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn-- -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX1250,GFX1250-GISEL,GFX1250-GISEL-FAKE16,GFX1250-FAKE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn-- -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX1250,GFX1250-GISEL,GFX1250-GISEL-TRUE16,GFX1250-TRUE16 %s
; ========= Single bit functions =========
@@ -55,6 +59,18 @@ define amdgpu_ps float @not_and_and_and(i32 %a, i32 %b, i32 %c) {
; GFX950-GISEL-NEXT: v_bitop3_b32 v0, v0, v2, v0 bitop3:0xc
; GFX950-GISEL-NEXT: v_and_b32_e32 v0, v0, v1
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: not_and_and_and:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:8
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: not_and_and_and:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_bitop3_b32 v0, v0, v2, v0 bitop3:0xc
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%nota = xor i32 %a, -1
%and1 = and i32 %nota, %c
%and2 = and i32 %and1, %b
@@ -87,6 +103,19 @@ define amdgpu_ps float @and_not_and_and(i32 %a, i32 %b, i32 %c) {
; GFX950-GISEL-NEXT: v_and_b32_e32 v0, v0, v2
; GFX950-GISEL-NEXT: v_and_b32_e32 v0, v0, v1
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: and_not_and_and:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0x20
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: and_not_and_and:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_not_b32_e32 v1, v1
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%notb = xor i32 %b, -1
%and1 = and i32 %a, %c
%and2 = and i32 %and1, %notb
@@ -105,6 +134,18 @@ define amdgpu_ps float @and_and_not_and(i32 %a, i32 %b, i32 %c) {
; GFX950-GISEL-NEXT: v_bitop3_b32 v0, v0, v2, v0 bitop3:0x30
; GFX950-GISEL-NEXT: v_and_b32_e32 v0, v0, v1
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: and_and_not_and:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0x40
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: and_and_not_and:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_bitop3_b32 v0, v0, v2, v0 bitop3:0x30
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%notc = xor i32 %c, -1
%and1 = and i32 %a, %notc
%and2 = and i32 %and1, %b
@@ -113,15 +154,10 @@ define amdgpu_ps float @and_and_not_and(i32 %a, i32 %b, i32 %c) {
}
define amdgpu_ps float @and_and_and(i32 %a, i32 %b, i32 %c) {
-; GFX950-SDAG-LABEL: and_and_and:
-; GFX950-SDAG: ; %bb.0:
-; GFX950-SDAG-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0x80
-; GFX950-SDAG-NEXT: ; return to shader part epilog
-;
-; GFX950-GISEL-LABEL: and_and_and:
-; GFX950-GISEL: ; %bb.0:
-; GFX950-GISEL-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0x80
-; GFX950-GISEL-NEXT: ; return to shader part epilog
+; GCN-LABEL: and_and_and:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0x80
+; GCN-NEXT: ; return to shader part epilog
%and1 = and i32 %a, %c
%and2 = and i32 %and1, %b
%ret_cast = bitcast i32 %and2 to float
@@ -131,15 +167,10 @@ define amdgpu_ps float @and_and_and(i32 %a, i32 %b, i32 %c) {
; ========= Multi bit functions =========
define amdgpu_ps float @test_12(i32 %a, i32 %b) {
-; GFX950-SDAG-LABEL: test_12:
-; GFX950-SDAG: ; %bb.0:
-; GFX950-SDAG-NEXT: v_bitop3_b32 v0, v0, v1, v0 bitop3:0xc
-; GFX950-SDAG-NEXT: ; return to shader part epilog
-;
-; GFX950-GISEL-LABEL: test_12:
-; GFX950-GISEL: ; %bb.0:
-; GFX950-GISEL-NEXT: v_bitop3_b32 v0, v0, v1, v0 bitop3:0xc
-; GFX950-GISEL-NEXT: ; return to shader part epilog
+; GCN-LABEL: test_12:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_bitop3_b32 v0, v0, v1, v0 bitop3:0xc
+; GCN-NEXT: ; return to shader part epilog
%nota = xor i32 %a, -1
%and1 = and i32 %nota, %b
%ret_cast = bitcast i32 %and1 to float
@@ -158,6 +189,19 @@ define amdgpu_ps float @test_63(i32 %a, i32 %b) {
; GFX950-GISEL-NEXT: v_not_b32_e32 v1, v1
; GFX950-GISEL-NEXT: v_or_b32_e32 v0, v0, v1
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: test_63:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, v0, v1, v0 bitop3:0x3f
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: test_63:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_not_b32_e32 v0, v0
+; GFX1250-GISEL-NEXT: v_not_b32_e32 v1, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%nota = xor i32 %a, -1
%notb = xor i32 %b, -1
%or = or i32 %nota, %notb
@@ -190,6 +234,19 @@ define amdgpu_ps float @test_126(i32 %a, i32 %b, i32 %c) {
; GFX950-GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
; GFX950-GISEL-NEXT: v_or_b32_e32 v0, v1, v0
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: test_126:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, v0, v2, v1 bitop3:0x7e
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: test_126:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v0, v1
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%xor1 = xor i32 %a, %b
%xor2 = xor i32 %a, %c
%or = or i32 %xor1, %xor2
@@ -216,6 +273,21 @@ define amdgpu_ps float @test_12_src_overflow(i32 %a, i32 %b, i32 %c) {
; GFX950-GISEL-NEXT: v_and_b32_e32 v2, v3, v4
; GFX950-GISEL-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0xc8
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: test_12_src_overflow:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, v0, v1, v0 bitop3:0xc
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: test_12_src_overflow:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_not_b32_e32 v3, v0
+; GFX1250-GISEL-NEXT: v_not_b32_e32 v4, v2
+; GFX1250-GISEL-NEXT: v_bitop3_b32 v0, v0, v2, v0 bitop3:0xc
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v3, v4
+; GFX1250-GISEL-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0xc8
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%nota = xor i32 %a, -1
%notc = xor i32 %c, -1
%and1 = and i32 %nota, %c
@@ -249,6 +321,29 @@ define amdgpu_ps float @test_100_src_overflow(i32 %a, i32 %b, i32 %c) {
; GFX950-GISEL-NEXT: v_and_b32_e32 v0, v0, v1
; GFX950-GISEL-NEXT: v_or3_b32 v0, v3, v4, v0
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: test_100_src_overflow:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v3, v1, v2, v0 bitop3:0x10
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v4, v0, v2, v1 bitop3:0x40
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, v1, v2, v0 bitop3:0x20
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_or3_b32 v0, v3, v4, v0
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: test_100_src_overflow:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_bitop3_b32 v3, v2, v0, v2 bitop3:3
+; GFX1250-GISEL-NEXT: v_bitop3_b32 v4, v0, v1, v0 bitop3:0x30
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v1, v0
+; GFX1250-GISEL-NEXT: v_not_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v4, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v5
+; GFX1250-GISEL-NEXT: v_or3_b32 v0, v1, v2, v0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%or1 = or i32 %c, %a
%not1 = xor i32 %or1, -1
%and1 = and i32 %b, %not1
@@ -267,11 +362,16 @@ define amdgpu_ps float @test_100_src_overflow(i32 %a, i32 %b, i32 %c) {
; ========= Ternary logical operations take precedence =========
define amdgpu_ps float @test_xor3(i32 %a, i32 %b, i32 %c) {
-; GCN-LABEL: test_xor3:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_xor_b32_e32 v0, v0, v1
-; GCN-NEXT: v_xor_b32_e32 v0, v0, v2
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: test_xor3:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX950-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: test_xor3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_xor3_b32 v0, v0, v1, v2
+; GFX1250-NEXT: ; return to shader part epilog
%xor1 = xor i32 %a, %b
%xor2 = xor i32 %xor1, %c
%ret_cast = bitcast i32 %xor2 to float
@@ -303,12 +403,20 @@ define amdgpu_ps float @test_and_or(i32 %a, i32 %b, i32 %c) {
; ========= Uniform cases =========
define amdgpu_ps float @uniform_3_op(i32 inreg %a, i32 inreg %b, i32 inreg %c) {
-; GCN-LABEL: uniform_3_op:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_andn2_b32 s0, s2, s0
-; GCN-NEXT: s_and_b32 s0, s0, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: uniform_3_op:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_andn2_b32 s0, s2, s0
+; GFX950-NEXT: s_and_b32 s0, s0, s1
+; GFX950-NEXT: v_mov_b32_e32 v0, s0
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: uniform_3_op:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_and_not1_b32 s0, s2, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: v_mov_b32_e32 v0, s0
+; GFX1250-NEXT: ; return to shader part epilog
%nota = xor i32 %a, -1
%and1 = and i32 %nota, %c
%and2 = and i32 %and1, %b
@@ -330,6 +438,21 @@ define amdgpu_ps float @uniform_4_op(i32 inreg %a, i32 inreg %b, i32 inreg %c) {
; GFX950-GISEL-NEXT: s_andn2_b32 s0, s0, s1
; GFX950-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: uniform_4_op:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, s0, s1, v0 bitop3:2
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: uniform_4_op:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_and_not1_b32 s0, s2, s0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_and_not1_b32 s0, s0, s1
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%nota = xor i32 %a, -1
%notb = xor i32 %b, -1
%and1 = and i32 %nota, %c
@@ -341,10 +464,30 @@ define amdgpu_ps float @uniform_4_op(i32 inreg %a, i32 inreg %b, i32 inreg %c) {
; ========= 16 bit tests =========
define amdgpu_ps half @not_and_not_and_not_and_b16(i16 %a, i16 %b, i16 %c) {
-; GCN-LABEL: not_and_not_and_not_and_b16:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:1
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: not_and_not_and_not_and_b16:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:1
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-FAKE16-LABEL: not_and_not_and_not_and_b16:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:1
+; GFX1250-SDAG-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-TRUE16-LABEL: not_and_not_and_not_and_b16:
+; GFX1250-SDAG-TRUE16: ; %bb.0:
+; GFX1250-SDAG-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v1.l, v2.l bitop3:1
+; GFX1250-SDAG-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-FAKE16-LABEL: not_and_not_and_not_and_b16:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:1
+; GFX1250-GISEL-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-TRUE16-LABEL: not_and_not_and_not_and_b16:
+; GFX1250-GISEL-TRUE16: ; %bb.0:
+; GFX1250-GISEL-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v1.l, v2.l bitop3:1
+; GFX1250-GISEL-TRUE16-NEXT: ; return to shader part epilog
%nota = xor i16 %a, -1
%notb = xor i16 %b, -1
%notc = xor i16 %c, -1
@@ -355,10 +498,30 @@ define amdgpu_ps half @not_and_not_and_not_and_b16(i16 %a, i16 %b, i16 %c) {
}
define amdgpu_ps half @not_and_not_and_and_b16(i16 %a, i16 %b, i16 %c) {
-; GCN-LABEL: not_and_not_and_and_b16:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:2
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: not_and_not_and_and_b16:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:2
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-FAKE16-LABEL: not_and_not_and_and_b16:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:2
+; GFX1250-SDAG-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-TRUE16-LABEL: not_and_not_and_and_b16:
+; GFX1250-SDAG-TRUE16: ; %bb.0:
+; GFX1250-SDAG-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v1.l, v2.l bitop3:2
+; GFX1250-SDAG-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-FAKE16-LABEL: not_and_not_and_and_b16:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:2
+; GFX1250-GISEL-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-TRUE16-LABEL: not_and_not_and_and_b16:
+; GFX1250-GISEL-TRUE16: ; %bb.0:
+; GFX1250-GISEL-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v1.l, v2.l bitop3:2
+; GFX1250-GISEL-TRUE16-NEXT: ; return to shader part epilog
%nota = xor i16 %a, -1
%notb = xor i16 %b, -1
%and1 = and i16 %nota, %c
@@ -368,10 +531,30 @@ define amdgpu_ps half @not_and_not_and_and_b16(i16 %a, i16 %b, i16 %c) {
}
define amdgpu_ps half @not_and_and_not_and_b16(i16 %a, i16 %b, i16 %c) {
-; GCN-LABEL: not_and_and_not_and_b16:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:4
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: not_and_and_not_and_b16:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:4
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-FAKE16-LABEL: not_and_and_not_and_b16:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:4
+; GFX1250-SDAG-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-TRUE16-LABEL: not_and_and_not_and_b16:
+; GFX1250-SDAG-TRUE16: ; %bb.0:
+; GFX1250-SDAG-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v1.l, v2.l bitop3:4
+; GFX1250-SDAG-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-FAKE16-LABEL: not_and_and_not_and_b16:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:4
+; GFX1250-GISEL-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-TRUE16-LABEL: not_and_and_not_and_b16:
+; GFX1250-GISEL-TRUE16: ; %bb.0:
+; GFX1250-GISEL-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v1.l, v2.l bitop3:4
+; GFX1250-GISEL-TRUE16-NEXT: ; return to shader part epilog
%nota = xor i16 %a, -1
%notc = xor i16 %c, -1
%and1 = and i16 %nota, %notc
@@ -391,6 +574,21 @@ define amdgpu_ps half @test_xor3_b16(i16 %a, i16 %b, i16 %c) {
; GFX950-GISEL-NEXT: v_xor_b32_e32 v0, v0, v1
; GFX950-GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_xor3_b16:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_bitop3_b16 v0, v0, v2, v1 bitop3:0x96
+; GFX1250-SDAG-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-TRUE16-LABEL: test_xor3_b16:
+; GFX1250-SDAG-TRUE16: ; %bb.0:
+; GFX1250-SDAG-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v2.l, v1.l bitop3:0x96
+; GFX1250-SDAG-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: test_xor3_b16:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_xor3_b32 v0, v0, v1, v2
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%xor1 = xor i16 %a, %b
%xor2 = xor i16 %xor1, %c
%ret_cast = bitcast i16 %xor2 to half
@@ -407,6 +605,21 @@ define amdgpu_ps half @test_or3_b16(i16 %a, i16 %b, i16 %c) {
; GFX950-GISEL: ; %bb.0:
; GFX950-GISEL-NEXT: v_or3_b32 v0, v0, v1, v2
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_or3_b16:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_bitop3_b16 v0, v0, v2, v1 bitop3:0xfe
+; GFX1250-SDAG-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-TRUE16-LABEL: test_or3_b16:
+; GFX1250-SDAG-TRUE16: ; %bb.0:
+; GFX1250-SDAG-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v2.l, v1.l bitop3:0xfe
+; GFX1250-SDAG-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: test_or3_b16:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%or1 = or i16 %a, %b
%or2 = or i16 %or1, %c
%ret_cast = bitcast i16 %or2 to half
@@ -423,10 +636,26 @@ define amdgpu_ps half @test_and_or_b16(i16 %a, i16 %b, i16 %c) {
; GFX950-GISEL: ; %bb.0:
; GFX950-GISEL-NEXT: v_and_or_b32 v0, v0, v1, v2
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_and_or_b16:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_bitop3_b16 v0, v0, v2, v1 bitop3:0xec
+; GFX1250-SDAG-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-TRUE16-LABEL: test_and_or_b16:
+; GFX1250-SDAG-TRUE16: ; %bb.0:
+; GFX1250-SDAG-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v2.l, v1.l bitop3:0xec
+; GFX1250-SDAG-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: test_and_or_b16:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_and_or_b32 v0, v0, v1, v2
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%and1 = and i16 %a, %b
%or1 = or i16 %and1, %c
%ret_cast = bitcast i16 %or1 to half
ret half %ret_cast
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX950: {{.*}}
+; GFX1250-FAKE16: {{.*}}
+; GFX1250-TRUE16: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-inst-size-gfx11.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-inst-size-gfx11.ll
new file mode 100644
index 0000000..dd38937
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-inst-size-gfx11.ll
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-s-branch-bits=4 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11 %s
+
+; Make sure the inst size estimate for D16 pseudo insts are not 0
+
+define amdgpu_kernel void @long_forward_branch_gfx11plus(ptr addrspace(1) %in, ptr addrspace(1) %out, i32 %cnd) #0 {
+; GFX11-LABEL: long_forward_branch_gfx11plus:
+; GFX11: ; %bb.0: ; %bb0
+; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x34
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-NEXT: s_cbranch_scc0 .LBB0_1
+; GFX11-NEXT: ; %bb.3: ; %bb0
+; GFX11-NEXT: s_getpc_b64 s[6:7]
+; GFX11-NEXT: .Lpost_getpc0:
+; GFX11-NEXT: s_add_u32 s6, s6, (.LBB0_2-.Lpost_getpc0)&4294967295
+; GFX11-NEXT: s_addc_u32 s7, s7, (.LBB0_2-.Lpost_getpc0)>>32
+; GFX11-NEXT: s_setpc_b64 s[6:7]
+; GFX11-NEXT: .LBB0_1: ; %bb2
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: global_load_d16_b16 v0, v1, s[0:1]
+; GFX11-NEXT: global_load_d16_hi_b16 v0, v1, s[0:1] offset:2
+; GFX11-NEXT: s_waitcnt vmcnt(1)
+; GFX11-NEXT: global_store_b16 v1, v0, s[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_d16_hi_b16 v1, v0, s[2:3] offset:2
+; GFX11-NEXT: .LBB0_2: ; %bb3
+; GFX11-NEXT: s_endpgm
+bb0:
+ ;%idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr inbounds i16, ptr addrspace(1) %in, i32 0
+ %gep1 = getelementptr inbounds i16, ptr addrspace(1) %in, i32 1
+ %out0 = getelementptr inbounds i16, ptr addrspace(1) %out, i32 0
+ %out1 = getelementptr inbounds i16, ptr addrspace(1) %out, i32 1
+ %cmp = icmp eq i32 %cnd, 0
+ br i1 %cmp, label %bb3, label %bb2 ; +9 dword branch
+bb2:
+ ; Estimated as 32-bytes on gfx11 (requiring a long branch)
+ %load0 = load i16, ptr addrspace(1) %gep0
+ %load1 = load i16, ptr addrspace(1) %gep1
+ store i16 %load0, ptr addrspace(1) %out0
+ store i16 %load1, ptr addrspace(1) %out1
+ br label %bb3
+bb3:
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
diff --git a/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll
new file mode 100644
index 0000000..fcbf7ef
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -show-mc-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+define i16 @cvt_pk_bf8_f16_v(ptr addrspace(1) %out) {
+; GFX1250-LABEL: cvt_pk_bf8_f16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
+; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
+; GFX1250-NEXT: v_cvt_pk_bf8_f16 v0, 0x38003800 ; encoding: [0x00,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x38]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> <half 0xH3800, half 0xH3800>)
+ ret i16 %cvt
+}
+
+; GFX1250: codeLenInByte = 24
+
+define i16 @cvt_pk_fp8_f16_v(ptr addrspace(1) %out) {
+; GFX1250-LABEL: cvt_pk_fp8_f16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
+; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
+; GFX1250-NEXT: v_cvt_pk_fp8_f16 v0, 0x3800 ; encoding: [0x00,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> <half 0xH3800, half 0xH0>)
+ ret i16 %cvt
+}
+
+; GFX1250: codeLenInByte = 24
diff --git a/llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll b/llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll
index dcf5179..0b099cd 100644
--- a/llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll
+++ b/llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll
@@ -442,9 +442,9 @@ define amdgpu_kernel void @add_x_shl_neg_to_sub_multi_use() #1 {
;
; GFX11-LABEL: add_x_shl_neg_to_sub_multi_use:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_dual_mov_b32 v1, 13 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: v_dual_mov_b32 v1, 13 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-NEXT: v_sub_nc_u32_e32 v0, 0, v0
; GFX11-NEXT: ds_store_b32 v0, v1 offset:123
; GFX11-NEXT: ds_store_b32 v0, v1 offset:456
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
index 9ae9d19..210e09f 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
@@ -1702,7 +1702,7 @@ entry:
%gep.r = getelementptr inbounds half, ptr addrspace(1) %r, i64 %tid.ext
%a.val = load volatile half, ptr addrspace(1) %gep.a
%b.val = load volatile half, ptr addrspace(1) %gep.b
- %r.val = fdiv half %a.val, %b.val
+ %r.val = fdiv afn half %a.val, %b.val
store half %r.val, ptr addrspace(1) %gep.r
ret void
}
@@ -2475,4 +2475,4 @@ declare <2 x half> @llvm.sqrt.v2f16(<2 x half>) #2
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
index bd4ee03..2ff66c9 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
@@ -281,7 +281,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -322,7 +322,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -370,7 +370,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -414,7 +414,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
@@ -458,7 +458,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
-; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn:
@@ -491,7 +491,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: .LBB12_4: ; %atomicrmw.private
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo
-; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -531,7 +531,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: .LBB13_4: ; %atomicrmw.private
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
-; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn_neg128:
@@ -567,7 +567,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: .LBB13_4: ; %atomicrmw.private
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo
-; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -673,7 +673,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB18_5
@@ -715,7 +715,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB18_5
@@ -764,7 +764,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB19_5
@@ -809,7 +809,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB19_5
@@ -855,7 +855,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn:
@@ -891,7 +891,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -934,7 +934,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn_neg128:
@@ -973,7 +973,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -1079,7 +1079,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB26_5
@@ -1121,7 +1121,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB26_5
@@ -1170,7 +1170,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB27_5
@@ -1215,7 +1215,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB27_5
@@ -1261,7 +1261,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn:
@@ -1297,7 +1297,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -1340,7 +1340,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn_neg128:
@@ -1379,7 +1379,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -1486,7 +1486,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3
; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB34_5
@@ -1529,7 +1529,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4
; GFX1250-GISEL-NEXT: v_and_b32_e32 v3, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB34_5
@@ -1579,7 +1579,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3
; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB35_5
@@ -1625,7 +1625,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4
; GFX1250-GISEL-NEXT: v_and_b32_e32 v3, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB35_5
@@ -1672,7 +1672,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn:
@@ -1709,7 +1709,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4
; GFX1250-GISEL-NEXT: v_and_b32_e32 v1, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -1753,7 +1753,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn_neg128:
@@ -1793,7 +1793,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4
; GFX1250-GISEL-NEXT: v_and_b32_e32 v1, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -1900,7 +1900,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3
; GFX1250-SDAG-NEXT: v_or_b32_e32 v2, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB42_5
@@ -1943,7 +1943,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4
; GFX1250-GISEL-NEXT: v_or_b32_e32 v3, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB42_5
@@ -1993,7 +1993,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3
; GFX1250-SDAG-NEXT: v_or_b32_e32 v2, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB43_5
@@ -2039,7 +2039,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4
; GFX1250-GISEL-NEXT: v_or_b32_e32 v3, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB43_5
@@ -2086,7 +2086,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3
; GFX1250-SDAG-NEXT: v_or_b32_e32 v0, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn:
@@ -2123,7 +2123,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4
; GFX1250-GISEL-NEXT: v_or_b32_e32 v1, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -2167,7 +2167,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3
; GFX1250-SDAG-NEXT: v_or_b32_e32 v0, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn_neg128:
@@ -2207,7 +2207,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4
; GFX1250-GISEL-NEXT: v_or_b32_e32 v1, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -2314,7 +2314,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB50_5
@@ -2357,7 +2357,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v3, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB50_5
@@ -2407,7 +2407,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB51_5
@@ -2453,7 +2453,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v3, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB51_5
@@ -2500,7 +2500,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn:
@@ -2537,7 +2537,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -2581,7 +2581,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn_neg128:
@@ -2621,7 +2621,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -2721,9 +2721,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: v_max_i64 v[2:3], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB58_5
@@ -2765,9 +2764,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: v_max_i64 v[2:3], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB58_5
@@ -2816,9 +2814,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: v_max_i64 v[2:3], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB59_5
@@ -2863,9 +2860,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: v_max_i64 v[2:3], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB59_5
@@ -2909,9 +2905,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: v_max_i64 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn:
@@ -2945,9 +2940,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: v_max_i64 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -2988,9 +2982,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: v_max_i64 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn_neg128:
@@ -3027,9 +3020,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: v_max_i64 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -3129,9 +3121,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_le_i64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: v_min_i64 v[2:3], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB66_5
@@ -3173,9 +3164,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: v_min_i64 v[2:3], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB66_5
@@ -3224,9 +3214,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_le_i64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: v_min_i64 v[2:3], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB67_5
@@ -3271,9 +3260,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: v_min_i64 v[2:3], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB67_5
@@ -3317,9 +3305,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_le_i64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: v_min_i64 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn:
@@ -3353,9 +3340,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: v_min_i64 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -3396,9 +3382,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_le_i64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: v_min_i64 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn_neg128:
@@ -3435,9 +3420,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: v_min_i64 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -3537,9 +3521,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: v_max_u64 v[2:3], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB74_5
@@ -3581,9 +3564,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: v_max_u64 v[2:3], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB74_5
@@ -3632,9 +3614,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: v_max_u64 v[2:3], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB75_5
@@ -3679,9 +3660,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: v_max_u64 v[2:3], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB75_5
@@ -3725,9 +3705,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: v_max_u64 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn:
@@ -3761,9 +3740,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: v_max_u64 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -3804,9 +3782,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: v_max_u64 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn_neg128:
@@ -3843,9 +3820,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: v_max_u64 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -3945,9 +3921,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_le_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: v_min_u64 v[2:3], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB82_5
@@ -3989,9 +3964,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: v_min_u64 v[2:3], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB82_5
@@ -4040,9 +4014,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_le_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: v_min_u64 v[2:3], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB83_5
@@ -4087,9 +4060,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: v_min_u64 v[2:3], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB83_5
@@ -4133,9 +4105,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_le_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: v_min_u64 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn:
@@ -4169,9 +4140,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: v_min_u64 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -4212,9 +4182,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_cmp_le_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: v_min_u64 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn_neg128:
@@ -4251,9 +4220,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: v_min_u64 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -4377,7 +4345,7 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v1, v5 :: v_dual_cndmask_b32 v2, v0, v4
-; GFX1250-SDAG-NEXT: scratch_store_b64 v8, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v8, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB90_5
@@ -4423,7 +4391,7 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v0, v6 :: v_dual_cndmask_b32 v3, v1, v7
-; GFX1250-GISEL-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB90_5
@@ -4477,7 +4445,7 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v1, v5 :: v_dual_cndmask_b32 v2, v0, v4
-; GFX1250-SDAG-NEXT: scratch_store_b64 v8, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v8, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB91_5
@@ -4526,7 +4494,7 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v0, v6 :: v_dual_cndmask_b32 v3, v1, v7
-; GFX1250-GISEL-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB91_5
@@ -4577,7 +4545,7 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_cndmask_b32 v0, v0, v4
-; GFX1250-SDAG-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn:
@@ -4617,7 +4585,7 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_cndmask_b32 v1, v1, v7
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -4664,7 +4632,7 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_cndmask_b32 v0, v0, v4
-; GFX1250-SDAG-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn_neg128:
@@ -4707,7 +4675,7 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_cndmask_b32 v1, v1, v7
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -4808,7 +4776,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4
-; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB98_5
@@ -4854,7 +4822,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB98_5
@@ -4906,7 +4874,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4
-; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-SDAG-NEXT: s_branch .LBB99_5
@@ -4955,7 +4923,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-GISEL-NEXT: s_branch .LBB99_5
@@ -5002,7 +4970,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4
-; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn:
@@ -5040,7 +5008,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -5084,7 +5052,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4
-; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn_neg128:
@@ -5125,7 +5093,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -5229,7 +5197,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v5, v3 :: v_dual_cndmask_b32 v2, v4, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX1250-SDAG-NEXT: s_branch .LBB106_5
@@ -5276,7 +5244,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_cndmask_b32 v3, v3, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX1250-GISEL-NEXT: s_branch .LBB106_5
@@ -5330,7 +5298,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v5, v3 :: v_dual_cndmask_b32 v2, v4, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX1250-SDAG-NEXT: s_branch .LBB107_5
@@ -5380,7 +5348,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_cndmask_b32 v3, v3, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX1250-GISEL-NEXT: s_branch .LBB107_5
@@ -5429,7 +5397,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v1, v3 :: v_dual_cndmask_b32 v0, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn:
@@ -5468,7 +5436,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_cndmask_b32 v1, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
@@ -5514,7 +5482,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v1, v3 :: v_dual_cndmask_b32 v0, v0, v2
-; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn_neg128:
@@ -5556,7 +5524,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_cndmask_b32 v1, v1, v5
-; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index b5e579b..b25d9b2 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -714,10 +714,10 @@ define amdgpu_kernel void @store_load_vindex_kernel(i32 %n) {
; GFX11-LABEL: store_load_vindex_kernel:
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-NEXT: v_mov_b32_e32 v2, 15
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_lshl_b32 s0, s0, 7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
@@ -732,9 +732,9 @@ define amdgpu_kernel void @store_load_vindex_kernel(i32 %n) {
; GFX12-LABEL: store_load_vindex_kernel:
; GFX12: ; %bb.0: ; %bb
; GFX12-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_lshl_b32 s0, s0, 7
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
@@ -769,8 +769,8 @@ define amdgpu_kernel void @store_load_vindex_kernel(i32 %n) {
; GFX942-LABEL: store_load_vindex_kernel:
; GFX942: ; %bb.0: ; %bb
; GFX942-NEXT: s_load_dword s0, s[4:5], 0x24
-; GFX942-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX942-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX942-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX942-NEXT: v_mov_b32_e32 v1, 15
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_lshl_b32 s0, s0, 7
@@ -809,10 +809,10 @@ define amdgpu_kernel void @store_load_vindex_kernel(i32 %n) {
; GFX11-PAL-LABEL: store_load_vindex_kernel:
; GFX11-PAL: ; %bb.0: ; %bb
; GFX11-PAL-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX11-PAL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-PAL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-PAL-NEXT: v_mov_b32_e32 v2, 15
; GFX11-PAL-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-PAL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-PAL-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-PAL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-PAL-NEXT: s_lshl_b32 s0, s0, 7
; GFX11-PAL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
@@ -827,9 +827,9 @@ define amdgpu_kernel void @store_load_vindex_kernel(i32 %n) {
; GFX12-PAL-LABEL: store_load_vindex_kernel:
; GFX12-PAL: ; %bb.0: ; %bb
; GFX12-PAL-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX12-PAL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-PAL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-PAL-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: s_lshl_b32 s0, s0, 7
; GFX12-PAL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
@@ -1958,10 +1958,10 @@ define amdgpu_kernel void @store_load_vindex_small_offset_kernel(i32 %n) {
; GFX11-LABEL: store_load_vindex_small_offset_kernel:
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX11-NEXT: scratch_load_b32 v3, off, off glc dlc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-NEXT: scratch_store_b32 v0, v1, off offset:384 dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
@@ -1976,10 +1976,10 @@ define amdgpu_kernel void @store_load_vindex_small_offset_kernel(i32 %n) {
; GFX12-LABEL: store_load_vindex_small_offset_kernel:
; GFX12: ; %bb.0: ; %bb
; GFX12-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX12-NEXT: scratch_load_b32 v3, off, off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v1, off offset:384 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
@@ -2021,8 +2021,8 @@ define amdgpu_kernel void @store_load_vindex_small_offset_kernel(i32 %n) {
; GFX942-NEXT: s_load_dword s0, s[4:5], 0x24
; GFX942-NEXT: scratch_load_dword v1, off, off sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX942-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX942-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX942-NEXT: v_mov_b32_e32 v1, 15
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_lshl_b32 s0, s0, 7
@@ -2092,10 +2092,10 @@ define amdgpu_kernel void @store_load_vindex_small_offset_kernel(i32 %n) {
; GFX11-PAL-LABEL: store_load_vindex_small_offset_kernel:
; GFX11-PAL: ; %bb.0: ; %bb
; GFX11-PAL-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX11-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX11-PAL-NEXT: scratch_load_b32 v3, off, off glc dlc
; GFX11-PAL-NEXT: s_waitcnt vmcnt(0)
-; GFX11-PAL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-PAL-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-PAL-NEXT: scratch_store_b32 v0, v1, off offset:384 dlc
; GFX11-PAL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-PAL-NEXT: s_waitcnt lgkmcnt(0)
@@ -2110,10 +2110,10 @@ define amdgpu_kernel void @store_load_vindex_small_offset_kernel(i32 %n) {
; GFX12-PAL-LABEL: store_load_vindex_small_offset_kernel:
; GFX12-PAL: ; %bb.0: ; %bb
; GFX12-PAL-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX12-PAL-NEXT: scratch_load_b32 v3, off, off scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_loadcnt 0x0
-; GFX12-PAL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-PAL-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: scratch_store_b32 v0, v1, off offset:384 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
@@ -3254,10 +3254,10 @@ define amdgpu_kernel void @store_load_vindex_large_offset_kernel(i32 %n) {
; GFX11-LABEL: store_load_vindex_large_offset_kernel:
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX11-NEXT: scratch_load_b32 v3, off, off offset:4 glc dlc
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_lshl_b32 s0, s0, 7
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -3274,10 +3274,10 @@ define amdgpu_kernel void @store_load_vindex_large_offset_kernel(i32 %n) {
; GFX12-LABEL: store_load_vindex_large_offset_kernel:
; GFX12: ; %bb.0: ; %bb
; GFX12-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX12-NEXT: scratch_load_b32 v3, off, off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v1, off offset:16512 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
@@ -3319,8 +3319,8 @@ define amdgpu_kernel void @store_load_vindex_large_offset_kernel(i32 %n) {
; GFX942-NEXT: s_load_dword s0, s[4:5], 0x24
; GFX942-NEXT: scratch_load_dword v1, off, off offset:4 sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX942-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX942-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX942-NEXT: v_mov_b32_e32 v1, 15
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_lshl_b32 s0, s0, 7
@@ -3391,10 +3391,10 @@ define amdgpu_kernel void @store_load_vindex_large_offset_kernel(i32 %n) {
; GFX11-PAL-LABEL: store_load_vindex_large_offset_kernel:
; GFX11-PAL: ; %bb.0: ; %bb
; GFX11-PAL-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX11-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX11-PAL-NEXT: scratch_load_b32 v3, off, off offset:4 glc dlc
; GFX11-PAL-NEXT: s_waitcnt vmcnt(0)
-; GFX11-PAL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-PAL-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-PAL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-PAL-NEXT: s_lshl_b32 s0, s0, 7
; GFX11-PAL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -3411,10 +3411,10 @@ define amdgpu_kernel void @store_load_vindex_large_offset_kernel(i32 %n) {
; GFX12-PAL-LABEL: store_load_vindex_large_offset_kernel:
; GFX12-PAL: ; %bb.0: ; %bb
; GFX12-PAL-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-PAL-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_lshlrev_b32 v0, 2, v0
; GFX12-PAL-NEXT: scratch_load_b32 v3, off, off scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_loadcnt 0x0
-; GFX12-PAL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-PAL-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX12-PAL-NEXT: s_wait_kmcnt 0x0
; GFX12-PAL-NEXT: scratch_store_b32 v0, v1, off offset:16512 scope:SCOPE_SYS
; GFX12-PAL-NEXT: s_wait_storecnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
index 6067194..f9a24fe 100644
--- a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx90a -amdgpu-atomic-optimizer-strategy=None | FileCheck %s -check-prefix=GFX90A
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx942 -amdgpu-atomic-optimizer-strategy=None | FileCheck %s -check-prefix=GFX942
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-atomic-optimizer-strategy=None | FileCheck %s -check-prefix=GFX1250
declare double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double, <4 x i32>, i32, i32, i32, i32 immarg)
declare double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double, ptr addrspace(8), i32, i32, i32, i32 immarg)
@@ -38,6 +39,17 @@ define amdgpu_kernel void @raw_buffer_atomic_add_noret_f64(<4 x i32> %rsrc, doub
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_add_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -57,6 +69,13 @@ define amdgpu_ps void @raw_buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, doub
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_add_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -93,6 +112,24 @@ define amdgpu_kernel void @raw_buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> %rsr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_add_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -121,6 +158,17 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_add_noret_f64(ptr addrspace(8)
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -140,6 +188,13 @@ define amdgpu_ps void @raw_ptr_buffer_atomic_add_rtn_f64(ptr addrspace(8) inreg
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -176,6 +231,24 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_add_rtn_f64_off4_slc(ptr addrsp
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -204,6 +277,17 @@ define amdgpu_kernel void @struct_buffer_atomic_add_noret_f64(<4 x i32> %rsrc, d
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_add_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -223,6 +307,13 @@ define amdgpu_ps void @struct_buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, d
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_add_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -259,6 +350,23 @@ define amdgpu_kernel void @struct_buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> %
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_add_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -287,6 +395,17 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_add_noret_f64(ptr addrspace(
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -306,6 +425,13 @@ define amdgpu_ps void @struct_ptr_buffer_atomic_add_rtn_f64(ptr addrspace(8) inr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -342,6 +468,23 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_add_rtn_f64_off4_slc(ptr add
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -370,6 +513,17 @@ define amdgpu_kernel void @raw_buffer_atomic_min_noret_f64(<4 x i32> %rsrc, doub
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_min_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -389,6 +543,13 @@ define amdgpu_ps void @raw_buffer_atomic_min_rtn_f64(<4 x i32> inreg %rsrc, doub
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_min_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -425,6 +586,24 @@ define amdgpu_kernel void @raw_buffer_atomic_min_rtn_f64_off4_slc(<4 x i32> %rsr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_min_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -453,6 +632,17 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_min_noret_f64(ptr addrspace(8)
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_min_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -472,6 +662,13 @@ define amdgpu_ps void @raw_ptr_buffer_atomic_min_rtn_f64(ptr addrspace(8) inreg
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_min_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -508,6 +705,24 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_min_rtn_f64_off4_slc(ptr addrsp
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_min_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -536,6 +751,17 @@ define amdgpu_kernel void @struct_buffer_atomic_min_noret_f64(<4 x i32> %rsrc, d
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_min_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -555,6 +781,13 @@ define amdgpu_ps void @struct_buffer_atomic_min_rtn_f64(<4 x i32> inreg %rsrc, d
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_min_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -591,6 +824,23 @@ define amdgpu_kernel void @struct_buffer_atomic_min_rtn_f64_off4_slc(<4 x i32> %
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_min_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -619,6 +869,17 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_min_noret_f64(ptr addrspace(
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_min_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -638,6 +899,13 @@ define amdgpu_ps void @struct_ptr_buffer_atomic_min_rtn_f64(ptr addrspace(8) inr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_min_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -674,6 +942,23 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_min_rtn_f64_off4_slc(ptr add
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_min_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_min_num_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -702,6 +987,17 @@ define amdgpu_kernel void @raw_buffer_atomic_max_noret_f64(<4 x i32> %rsrc, doub
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_max_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -721,6 +1017,13 @@ define amdgpu_ps void @raw_buffer_atomic_max_rtn_f64(<4 x i32> inreg %rsrc, doub
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_max_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -757,6 +1060,24 @@ define amdgpu_kernel void @raw_buffer_atomic_max_rtn_f64_off4_slc(<4 x i32> %rsr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_buffer_atomic_max_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -785,6 +1106,17 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_max_noret_f64(ptr addrspace(8)
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_max_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null offen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
ret void
@@ -804,6 +1136,13 @@ define amdgpu_ps void @raw_ptr_buffer_atomic_max_rtn_f64(ptr addrspace(8) inreg
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_max_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0)
store double %ret, ptr poison
@@ -840,6 +1179,24 @@ define amdgpu_kernel void @raw_ptr_buffer_atomic_max_rtn_f64_off4_slc(ptr addrsp
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_max_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s6, 4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], s6 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.raw.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -868,6 +1225,17 @@ define amdgpu_kernel void @struct_buffer_atomic_max_noret_f64(<4 x i32> %rsrc, d
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_max_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -887,6 +1255,13 @@ define amdgpu_ps void @struct_buffer_atomic_max_rtn_f64(<4 x i32> inreg %rsrc, d
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_max_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -923,6 +1298,23 @@ define amdgpu_kernel void @struct_buffer_atomic_max_rtn_f64_off4_slc(<4 x i32> %
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_buffer_atomic_max_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -951,6 +1343,17 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_max_noret_f64(ptr addrspace(
; GFX942-NEXT: v_mov_b32_e32 v2, s8
; GFX942-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_max_noret_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
ret void
@@ -970,6 +1373,13 @@ define amdgpu_ps void @struct_ptr_buffer_atomic_max_rtn_f64(ptr addrspace(8) inr
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_max_rtn_f64:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
store double %ret, ptr poison
@@ -1006,6 +1416,23 @@ define amdgpu_kernel void @struct_ptr_buffer_atomic_max_rtn_f64_off4_slc(ptr add
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: global_store_dwordx2 v0, v[2:3], s[8:9]
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_max_rtn_f64_off4_slc:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s10
+; GFX1250-NEXT: buffer_atomic_max_num_f64 v[0:1], v2, s[0:3], null idxen offset:4 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x44
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f64(double %data, ptr addrspace(8) %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
store double %ret, ptr addrspace(1) %out, align 8
@@ -1038,6 +1465,19 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %pt
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1067,6 +1507,17 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat_agent:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1098,6 +1549,19 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat_system:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1127,6 +1591,17 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat_flush:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1154,6 +1629,19 @@ define double @global_atomic_fadd_f64_rtn_pat(ptr addrspace(1) %ptr, double %dat
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_rtn_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1179,6 +1667,18 @@ define double @global_atomic_fadd_f64_rtn_pat_agent(ptr addrspace(1) %ptr, doubl
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_rtn_pat_agent:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1206,6 +1706,19 @@ define double @global_atomic_fadd_f64_rtn_pat_system(ptr addrspace(1) %ptr, doub
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_rtn_pat_system:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1246,6 +1759,17 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(ptr addrs
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
ret void
@@ -1277,6 +1801,19 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_noret_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1306,6 +1843,17 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_noret_pat_agent:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1337,6 +1885,19 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_system(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_noret_pat_system:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1364,6 +1925,19 @@ define double @flat_atomic_fadd_f64_rtn_pat(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_rtn_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1389,6 +1963,18 @@ define double @flat_atomic_fadd_f64_rtn_pat_agent(ptr %ptr) #1 {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_rtn_pat_agent:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1418,6 +2004,19 @@ define double @flat_atomic_fadd_f64_rtn_pat_system(ptr %ptr) #1 {
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_rtn_pat_system:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1458,6 +2057,17 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent_safe(ptr %ptr) {
; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: flat_atomic_fadd_f64_noret_pat_agent_safe:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f64 v2, v[0:1], s[0:1] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1
ret void
@@ -1485,6 +2095,31 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret(ptr addrspace(3) %ptr, do
; GFX942-NEXT: ds_add_f64 v2, v[0:1]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_noret:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x24
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x2c
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v2, s2
+; GFX1250-NEXT: s_mov_b32 s2, 0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0
+; GFX1250-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f64_e32 v[4:5], s[0:1], v[0:1]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX1250-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1250-NEXT: s_cbranch_execnz .LBB51_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
ret void
@@ -1508,6 +2143,30 @@ define double @local_atomic_fadd_f64_rtn(ptr addrspace(3) %ptr, double %data) {
; GFX942-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_rtn:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0
+; GFX1250-NEXT: v_mov_b32_e32 v4, v1
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB52_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
ret double %ret
@@ -1534,6 +2193,29 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX942-NEXT: ds_add_f64 v2, v[0:1]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0
+; GFX1250-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB53_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1560,6 +2242,29 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX942-NEXT: ds_add_f64 v2, v[0:1]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0
+; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB54_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
@@ -1586,6 +2291,29 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX942-NEXT: ds_add_f64 v2, v[0:1]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_endpgm
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0
+; GFX1250-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB55_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
ret void
@@ -1608,6 +2336,29 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data
; GFX942-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_rtn_pat:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v2, v0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB56_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
ret double %ret
@@ -1631,6 +2382,30 @@ define double @local_atomic_fadd_f64_rtn_ieee_unsafe(ptr addrspace(3) %ptr, doub
; GFX942-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_rtn_ieee_unsafe:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0
+; GFX1250-NEXT: v_mov_b32_e32 v4, v1
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB57_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
ret double %ret
@@ -1654,6 +2429,30 @@ define double @local_atomic_fadd_f64_rtn_ieee_safe(ptr addrspace(3) %ptr, double
; GFX942-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: local_atomic_fadd_f64_rtn_ieee_safe:
+; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0
+; GFX1250-NEXT: v_mov_b32_e32 v4, v1
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5]
+; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB58_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
ret double %ret
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
index 57b4857..c52fb61 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -11,6 +11,10 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SDAG-FAKE16 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-GISEL-TRUE16 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-GISEL-FAKE16 %s
+; TODO: FIXME-TRUE16 llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=0 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-SDAG-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=0 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-SDAG-FAKE16 %s
+; TODO: FIXME-TRUE16 llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=1 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-GISEL-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=1 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-GISEL-FAKE16 %s
define amdgpu_kernel void @fptrunc_f32_to_f16(
; SI-SDAG-LABEL: fptrunc_f32_to_f16:
@@ -192,6 +196,39 @@ define amdgpu_kernel void @fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -381,6 +418,39 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_afn(ptr addrspace(1) %r,
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %a) {
entry:
%a.val = load float, ptr addrspace(1) %a
@@ -1089,6 +1159,130 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f64_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s3, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s4, 0x3f1, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v1, s4, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s3, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s3, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s4, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f64_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s3, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s3, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s6, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s5, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s2, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s6, s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s7, s4, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s6, s6, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s5, s5, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s9, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s2, s7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s6, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s5, s5, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s4, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s6, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s2, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s2, s2, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s6, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s7, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s6, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s2, s2, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s4, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 0x7c00, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s4, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s5, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -1101,62 +1295,21 @@ entry:
define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; SI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; SI-SDAG: ; %bb.0: ; %entry
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
-; SI-SDAG-NEXT: s_mov_b32 s10, s2
-; SI-SDAG-NEXT: s_mov_b32 s11, s3
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s6, -1
+; SI-SDAG-NEXT: s_mov_b32 s10, s6
+; SI-SDAG-NEXT: s_mov_b32 s11, s7
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s8, s6
-; SI-SDAG-NEXT: s_mov_b32 s9, s7
+; SI-SDAG-NEXT: s_mov_b32 s8, s2
+; SI-SDAG-NEXT: s_mov_b32 s9, s3
; SI-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; SI-SDAG-NEXT: s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT: s_mov_b32 s4, s0
+; SI-SDAG-NEXT: s_mov_b32 s5, s1
; SI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT: v_readfirstlane_b32 s1, v1
-; SI-SDAG-NEXT: s_and_b32 s6, s1, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s7, s1, 8
-; SI-SDAG-NEXT: s_bfe_u32 s8, s1, 0xb0014
-; SI-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; SI-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; SI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT: s_or_b32 s6, s6, s7
-; SI-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; SI-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; SI-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; SI-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s7, s10, s7
-; SI-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; SI-SDAG-NEXT: s_or_b32 s9, s6, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; SI-SDAG-NEXT: s_and_b32 s9, s7, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; SI-SDAG-NEXT: s_or_b32 s9, s9, s10
-; SI-SDAG-NEXT: s_add_i32 s7, s7, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, s7
-; SI-SDAG-NEXT: s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT: s_or_b32 s6, s1, s0
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1174,62 +1327,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; VI-SDAG: ; %bb.0: ; %entry
-; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SDAG-NEXT: s_mov_b32 s10, s2
-; VI-SDAG-NEXT: s_mov_b32 s11, s3
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s6, -1
+; VI-SDAG-NEXT: s_mov_b32 s10, s6
+; VI-SDAG-NEXT: s_mov_b32 s11, s7
; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s8, s6
-; VI-SDAG-NEXT: s_mov_b32 s9, s7
+; VI-SDAG-NEXT: s_mov_b32 s8, s2
+; VI-SDAG-NEXT: s_mov_b32 s9, s3
; VI-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; VI-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT: s_mov_b32 s4, s0
+; VI-SDAG-NEXT: s_mov_b32 s5, s1
; VI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v1
-; VI-SDAG-NEXT: s_and_b32 s5, s4, 0x1ff
-; VI-SDAG-NEXT: v_or_b32_e32 v0, s5, v0
-; VI-SDAG-NEXT: s_lshr_b32 s7, s4, 8
-; VI-SDAG-NEXT: s_bfe_u32 s8, s4, 0xb0014
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT: s_and_b32 s5, s7, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; VI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT: s_or_b32 s5, s5, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT: s_or_b32 s7, s5, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; VI-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; VI-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; VI-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; VI-SDAG-NEXT: s_or_b32 s7, s10, s7
-; VI-SDAG-NEXT: s_or_b32 s9, s5, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; VI-SDAG-NEXT: s_and_b32 s9, s7, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; VI-SDAG-NEXT: s_or_b32 s9, s9, s10
-; VI-SDAG-NEXT: s_add_i32 s7, s7, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s5, 0
-; VI-SDAG-NEXT: s_cselect_b32 s5, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s5, s5, s7
-; VI-SDAG-NEXT: s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT: s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT: s_or_b32 s4, s4, s5
-; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1247,62 +1359,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; GFX9-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT: s_mov_b32 s11, s7
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX9-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, s1
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; GFX9-SDAG-NEXT: s_and_b32 s6, s5, 0x1ff
-; GFX9-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s7, s5, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s8, s5, 0xb0014
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; GFX9-SDAG-NEXT: s_or_b32 s7, s10, s7
-; GFX9-SDAG-NEXT: s_or_b32 s9, s6, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; GFX9-SDAG-NEXT: s_and_b32 s9, s7, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; GFX9-SDAG-NEXT: s_or_b32 s9, s9, s10
-; GFX9-SDAG-NEXT: s_add_i32 s7, s7, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, s7
-; GFX9-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT: s_or_b32 s4, s5, s4
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; GFX9-SDAG-NEXT: s_endpgm
;
; GFX9-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1320,62 +1391,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; GFX950-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX950-SDAG: ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT: s_mov_b32 s11, s7
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX950-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX950-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT: s_mov_b32 s5, s1
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; GFX950-SDAG-NEXT: s_and_b32 s6, s5, 0x1ff
-; GFX950-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s7, s5, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s8, s5, 0xb0014
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; GFX950-SDAG-NEXT: s_or_b32 s7, s10, s7
-; GFX950-SDAG-NEXT: s_or_b32 s9, s6, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; GFX950-SDAG-NEXT: s_and_b32 s9, s7, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; GFX950-SDAG-NEXT: s_or_b32 s9, s9, s10
-; GFX950-SDAG-NEXT: s_add_i32 s7, s7, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, s7
-; GFX950-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT: s_or_b32 s4, s5, s4
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX950-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1401,60 +1431,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s9, s3
-; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
; GFX11-SDAG-TRUE16-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
@@ -1468,60 +1451,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
-; GFX11-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
@@ -1552,6 +1488,40 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f64_to_f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f64_to_f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -1769,6 +1739,38 @@ define amdgpu_kernel void @fptrunc_v2f32_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f32_to_v2f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f32_to_v2f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3014,6 +3016,225 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f64_to_v2f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v2, s3, v2
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s4, 0x3f1, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v3, s4, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s3, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s3, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s4, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s10, s4, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s5, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s10, s10, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s9, 0x3f1, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v1, s9, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s12, s10, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s11, s12, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s11, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s5, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s3, s12, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s10, s5, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s10, s3, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s10, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s11, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s10, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s10, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s10, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s3, s3, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s9, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s5, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s4, s4, 16
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s3, s4, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f64_to_v2f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s5, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s2, s5, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s8, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s2, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s8, 1, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s10, s3, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s8, s8, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s9, s2, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s8, s8, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s4, s4, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s11, s10, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s8, s11, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s4, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s8, s10
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s11, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s2, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s3, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s3, s3, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s2, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 0x7c00, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s2, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s4, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s7, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s7, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s7, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s5, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s9, s3, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s6, s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s8, s4, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s6, s6, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s5, s5, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s10, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s6, s10, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s5, s5, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, s9
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s10, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s4, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s6, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s6, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s6, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s3, s3, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s4, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 0x7c00, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s4, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s5, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s4, s7, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s4, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3026,106 +3247,25 @@ entry:
define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; SI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; SI-SDAG: ; %bb.0: ; %entry
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
-; SI-SDAG-NEXT: s_mov_b32 s10, s2
-; SI-SDAG-NEXT: s_mov_b32 s11, s3
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s6, -1
+; SI-SDAG-NEXT: s_mov_b32 s10, s6
+; SI-SDAG-NEXT: s_mov_b32 s11, s7
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s8, s6
-; SI-SDAG-NEXT: s_mov_b32 s9, s7
+; SI-SDAG-NEXT: s_mov_b32 s8, s2
+; SI-SDAG-NEXT: s_mov_b32 s9, s3
; SI-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; SI-SDAG-NEXT: s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT: s_mov_b32 s4, s0
+; SI-SDAG-NEXT: s_mov_b32 s5, s1
; SI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT: v_readfirstlane_b32 s1, v3
-; SI-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; SI-SDAG-NEXT: s_and_b32 s7, s1, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s8, s1, 8
-; SI-SDAG-NEXT: s_bfe_u32 s9, s1, 0xb0014
-; SI-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; SI-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; SI-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; SI-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; SI-SDAG-NEXT: s_or_b32 s7, s7, s8
-; SI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; SI-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; SI-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; SI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; SI-SDAG-NEXT: s_or_b32 s10, s7, s10
-; SI-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; SI-SDAG-NEXT: s_and_b32 s10, s8, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; SI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; SI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT: s_or_b32 s10, s10, s11
-; SI-SDAG-NEXT: s_add_i32 s8, s8, s10
-; SI-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT: s_cselect_b32 s7, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; SI-SDAG-NEXT: s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; SI-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; SI-SDAG-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; SI-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; SI-SDAG-NEXT: s_or_b32 s1, s1, s7
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; SI-SDAG-NEXT: s_lshl_b32 s1, s1, 16
-; SI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT: s_or_b32 s7, s8, s7
-; SI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; SI-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; SI-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; SI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; SI-SDAG-NEXT: s_or_b32 s9, s7, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; SI-SDAG-NEXT: s_and_b32 s9, s8, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT: s_or_b32 s9, s9, s11
-; SI-SDAG-NEXT: s_add_i32 s8, s8, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, s8
-; SI-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; SI-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; SI-SDAG-NEXT: s_or_b32 s0, s6, s0
-; SI-SDAG-NEXT: s_and_b32 s0, s0, 0xffff
-; SI-SDAG-NEXT: s_or_b32 s6, s0, s1
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2
+; SI-SDAG-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3147,106 +3287,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; VI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; VI-SDAG: ; %bb.0: ; %entry
-; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SDAG-NEXT: s_mov_b32 s10, s2
-; VI-SDAG-NEXT: s_mov_b32 s11, s3
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s6, -1
+; VI-SDAG-NEXT: s_mov_b32 s10, s6
+; VI-SDAG-NEXT: s_mov_b32 s11, s7
; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s8, s6
-; VI-SDAG-NEXT: s_mov_b32 s9, s7
+; VI-SDAG-NEXT: s_mov_b32 s8, s2
+; VI-SDAG-NEXT: s_mov_b32 s9, s3
; VI-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; VI-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT: s_mov_b32 s4, s0
+; VI-SDAG-NEXT: s_mov_b32 s5, s1
; VI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v3
-; VI-SDAG-NEXT: s_and_b32 s7, s4, 0x1ff
-; VI-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; VI-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; VI-SDAG-NEXT: s_lshr_b32 s8, s4, 8
-; VI-SDAG-NEXT: s_bfe_u32 s9, s4, 0xb0014
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; VI-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; VI-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; VI-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; VI-SDAG-NEXT: s_or_b32 s7, s7, s8
-; VI-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; VI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; VI-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; VI-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; VI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT: s_or_b32 s10, s7, s10
-; VI-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; VI-SDAG-NEXT: s_and_b32 s10, s8, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; VI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; VI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT: s_or_b32 s10, s10, s11
-; VI-SDAG-NEXT: s_add_i32 s8, s8, s10
-; VI-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT: s_cselect_b32 s7, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; VI-SDAG-NEXT: s_and_b32 s8, s5, 0x1ff
-; VI-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; VI-SDAG-NEXT: s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT: s_lshr_b32 s9, s5, 8
-; VI-SDAG-NEXT: s_bfe_u32 s10, s5, 0xb0014
-; VI-SDAG-NEXT: s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; VI-SDAG-NEXT: s_or_b32 s4, s4, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; VI-SDAG-NEXT: s_or_b32 s7, s8, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; VI-SDAG-NEXT: s_lshl_b32 s4, s4, 16
-; VI-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; VI-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; VI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT: s_or_b32 s9, s7, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; VI-SDAG-NEXT: s_and_b32 s9, s8, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT: s_or_b32 s9, s9, s11
-; VI-SDAG-NEXT: s_add_i32 s8, s8, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT: s_cselect_b32 s6, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s6, s6, s8
-; VI-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; VI-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SDAG-NEXT: s_or_b32 s5, s5, s6
-; VI-SDAG-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT: v_cvt_f16_f32_sdwa v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3267,104 +3325,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; GFX9-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT: s_mov_b32 s11, s7
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX9-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, s1
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s5, v3
-; GFX9-SDAG-NEXT: s_and_b32 s7, s5, 0x1ff
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX9-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s5, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s9, s5, 0xb0014
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX9-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX9-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; GFX9-SDAG-NEXT: s_or_b32 s7, s7, s8
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; GFX9-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; GFX9-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; GFX9-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT: s_or_b32 s10, s7, s10
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; GFX9-SDAG-NEXT: s_and_b32 s10, s8, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT: s_or_b32 s10, s10, s11
-; GFX9-SDAG-NEXT: s_add_i32 s8, s8, s10
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; GFX9-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; GFX9-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; GFX9-SDAG-NEXT: s_or_b32 s5, s5, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX9-SDAG-NEXT: s_or_b32 s7, s8, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; GFX9-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT: s_or_b32 s9, s7, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; GFX9-SDAG-NEXT: s_and_b32 s9, s8, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT: s_or_b32 s9, s9, s11
-; GFX9-SDAG-NEXT: s_add_i32 s8, s8, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, s8
-; GFX9-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; GFX9-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; GFX9-SDAG-NEXT: s_or_b32 s4, s6, s4
-; GFX9-SDAG-NEXT: s_pack_ll_b32_b16 s4, s4, s5
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT: v_pack_b32_f16 v0, v0, v1
+; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX9-SDAG-NEXT: s_endpgm
;
; GFX9-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3385,104 +3363,22 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; GFX950-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; GFX950-SDAG: ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT: s_mov_b32 s11, s7
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX950-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX950-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT: s_mov_b32 s5, s1
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s5, v3
-; GFX950-SDAG-NEXT: s_and_b32 s7, s5, 0x1ff
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX950-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s5, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s9, s5, 0xb0014
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX950-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX950-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; GFX950-SDAG-NEXT: s_or_b32 s7, s7, s8
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; GFX950-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; GFX950-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; GFX950-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT: s_or_b32 s10, s7, s10
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; GFX950-SDAG-NEXT: s_and_b32 s10, s8, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT: s_or_b32 s10, s10, s11
-; GFX950-SDAG-NEXT: s_add_i32 s8, s8, s10
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; GFX950-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; GFX950-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; GFX950-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; GFX950-SDAG-NEXT: s_or_b32 s5, s5, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX950-SDAG-NEXT: s_or_b32 s7, s8, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; GFX950-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT: s_or_b32 s9, s7, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; GFX950-SDAG-NEXT: s_and_b32 s9, s8, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT: s_or_b32 s9, s9, s11
-; GFX950-SDAG-NEXT: s_add_i32 s8, s8, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, s8
-; GFX950-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; GFX950-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; GFX950-SDAG-NEXT: s_or_b32 s4, s6, s4
-; GFX950-SDAG-NEXT: s_pack_ll_b32_b16 s4, s4, s5
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v0, v0, v2
+; GFX950-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3511,109 +3407,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s9, s10, s9
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s3, s12, s3
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s9, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s10, s3, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s5, 0x40f
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s3, s4, s3
-; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v1, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v2
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_pack_b32_f16 v0, v0.h, v0.l
; GFX11-SDAG-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
@@ -3627,109 +3431,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
; GFX11-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s9, s10, s9
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s3, s12, s3
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s10, s3, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s5, 0x40f
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s3, s4, s3
-; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
@@ -3768,6 +3480,46 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f64_to_v2f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f64_to_v2f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[4:5]
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v1, s[6:7]
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3957,6 +3709,42 @@ define amdgpu_kernel void @fneg_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fneg_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fneg_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_xor_b32 s2, s2, 0x80000000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -4147,6 +3935,42 @@ define amdgpu_kernel void @fabs_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fabs_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fabs_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset0_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -4337,6 +4161,42 @@ define amdgpu_kernel void @fneg_fabs_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fneg_fabs_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, 0x80000000, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fneg_fabs_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset1_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4536,6 +4396,42 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_zext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_zext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_zext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s2, 0xffff, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4735,6 +4631,45 @@ define amdgpu_kernel void @fptrunc_fabs_f32_to_f16_zext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_fabs_f32_to_f16_zext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_fabs_f32_to_f16_zext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset0_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s2, 0xffff, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4943,6 +4878,42 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_sext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_sext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_sext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_sext_i32_i16 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
index 4f8eab1..5d31177 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
@@ -226,59 +226,59 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-SAFE-SDAG: ; %bb.0:
-; VI-SAFE-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT: s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT: s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT: s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT: s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16:
+; VI-SDAG: ; %bb.0:
+; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s2, -1
+; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT: s_mov_b32 s0, s4
+; VI-SDAG-NEXT: s_lshr_b32 s4, s7, 8
+; VI-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
+; VI-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
+; VI-SDAG-NEXT: s_or_b32 s4, s4, s6
+; VI-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT: s_mov_b32 s1, s5
+; VI-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v0
+; VI-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
+; VI-SDAG-NEXT: s_or_b32 s4, s8, s4
+; VI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
+; VI-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
+; VI-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; VI-SDAG-NEXT: v_readfirstlane_b32 s8, v0
+; VI-SDAG-NEXT: s_lshr_b32 s9, s5, s8
+; VI-SDAG-NEXT: s_lshl_b32 s8, s9, s8
+; VI-SDAG-NEXT: s_cmp_lg_u32 s8, s5
+; VI-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; VI-SDAG-NEXT: s_addk_i32 s6, 0xfc10
+; VI-SDAG-NEXT: s_lshl_b32 s8, s6, 12
+; VI-SDAG-NEXT: s_or_b32 s5, s9, s5
+; VI-SDAG-NEXT: s_or_b32 s8, s4, s8
+; VI-SDAG-NEXT: s_cmp_lt_i32 s6, 1
+; VI-SDAG-NEXT: s_cselect_b32 s5, s5, s8
+; VI-SDAG-NEXT: s_and_b32 s8, s5, 7
+; VI-SDAG-NEXT: s_cmp_gt_i32 s8, 5
+; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
+; VI-SDAG-NEXT: s_cmp_eq_u32 s8, 3
+; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
+; VI-SDAG-NEXT: s_or_b32 s8, s8, s9
+; VI-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; VI-SDAG-NEXT: s_add_i32 s5, s5, s8
+; VI-SDAG-NEXT: s_cmp_lt_i32 s6, 31
+; VI-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; VI-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; VI-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
+; VI-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
+; VI-SDAG-NEXT: s_cselect_b32 s4, s4, s5
+; VI-SDAG-NEXT: s_lshr_b32 s5, s7, 16
+; VI-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
+; VI-SDAG-NEXT: s_or_b32 s4, s5, s4
+; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16:
; VI-GISEL: ; %bb.0:
@@ -331,68 +331,57 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; VI-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-GISEL-NEXT: s_endpgm
;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-UNSAFE-SDAG: ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-SAFE-SDAG: ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT: s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
+; GFX10-SDAG-NEXT: s_lshr_b32 s5, s3, 8
+; GFX10-SDAG-NEXT: s_or_b32 s2, s4, s2
+; GFX10-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX10-SDAG-NEXT: s_cselect_b32 s2, -1, 0
+; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX10-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
+; GFX10-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
+; GFX10-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
+; GFX10-SDAG-NEXT: v_readfirstlane_b32 s5, v0
+; GFX10-SDAG-NEXT: v_readfirstlane_b32 s6, v1
+; GFX10-SDAG-NEXT: s_or_b32 s4, s4, s5
+; GFX10-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX10-SDAG-NEXT: s_lshr_b32 s7, s5, s6
+; GFX10-SDAG-NEXT: s_lshl_b32 s6, s7, s6
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s6, s5
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; GFX10-SDAG-NEXT: s_addk_i32 s2, 0xfc10
+; GFX10-SDAG-NEXT: s_or_b32 s5, s7, s5
+; GFX10-SDAG-NEXT: s_lshl_b32 s6, s2, 12
+; GFX10-SDAG-NEXT: s_or_b32 s6, s4, s6
+; GFX10-SDAG-NEXT: s_cmp_lt_i32 s2, 1
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-SDAG-NEXT: s_and_b32 s6, s5, 7
+; GFX10-SDAG-NEXT: s_cmp_gt_i32 s6, 5
+; GFX10-SDAG-NEXT: s_cselect_b32 s7, 1, 0
+; GFX10-SDAG-NEXT: s_cmp_eq_u32 s6, 3
+; GFX10-SDAG-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; GFX10-SDAG-NEXT: s_or_b32 s6, s6, s7
+; GFX10-SDAG-NEXT: s_add_i32 s5, s5, s6
+; GFX10-SDAG-NEXT: s_cmp_lt_i32 s2, 31
+; GFX10-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
+; GFX10-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
+; GFX10-SDAG-NEXT: s_cselect_b32 s2, s4, s5
+; GFX10-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX10-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX10-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT: s_endpgm
;
; GFX10-GISEL-LABEL: fptrunc_f64_to_f16:
; GFX10-GISEL: ; %bb.0:
@@ -445,76 +434,65 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX10-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX10-GISEL-NEXT: s_endpgm
;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-UNSAFE-SDAG: ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX11-SAFE-SDAG: ; %bb.0:
-; GFX11-SAFE-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX11-SAFE-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-SAFE-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
+; GFX11-SDAG-NEXT: s_lshr_b32 s5, s3, 8
+; GFX11-SDAG-NEXT: s_or_b32 s2, s4, s2
+; GFX11-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-SDAG-NEXT: s_cselect_b32 s2, -1, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
+; GFX11-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
+; GFX11-SDAG-NEXT: v_readfirstlane_b32 s5, v0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_readfirstlane_b32 s6, v1
+; GFX11-SDAG-NEXT: s_or_b32 s4, s4, s5
+; GFX11-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_lshr_b32 s7, s5, s6
+; GFX11-SDAG-NEXT: s_lshl_b32 s6, s7, s6
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s6, s5
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; GFX11-SDAG-NEXT: s_addk_i32 s2, 0xfc10
+; GFX11-SDAG-NEXT: s_or_b32 s5, s7, s5
+; GFX11-SDAG-NEXT: s_lshl_b32 s6, s2, 12
+; GFX11-SDAG-NEXT: s_or_b32 s6, s4, s6
+; GFX11-SDAG-NEXT: s_cmp_lt_i32 s2, 1
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, s5, s6
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_b32 s6, s5, 7
+; GFX11-SDAG-NEXT: s_cmp_gt_i32 s6, 5
+; GFX11-SDAG-NEXT: s_cselect_b32 s7, 1, 0
+; GFX11-SDAG-NEXT: s_cmp_eq_u32 s6, 3
+; GFX11-SDAG-NEXT: s_cselect_b32 s6, 1, 0
+; GFX11-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; GFX11-SDAG-NEXT: s_or_b32 s6, s6, s7
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_add_i32 s5, s5, s6
+; GFX11-SDAG-NEXT: s_cmp_lt_i32 s2, 31
+; GFX11-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
+; GFX11-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
+; GFX11-SDAG-NEXT: s_cselect_b32 s2, s4, s5
+; GFX11-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX11-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX11-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
+; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: fptrunc_f64_to_f16:
; GFX11-GISEL: ; %bb.0:
@@ -570,30 +548,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX11-GISEL-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-NEXT: s_endpgm
-;
-; GFX11-UNSAFE-DAG-TRUE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-TRUE16: ; %bb.0:
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_endpgm
-;
-; GFX11-UNSAFE-DAG-FAKE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-FAKE16: ; %bb.0:
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_endpgm
%result = fptrunc double %in to half
%result_i16 = bitcast half %result to i16
store i16 %result_i16, ptr addrspace(1) %out
@@ -603,111 +557,27 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double %in) {
; SI-LABEL: fptrunc_f64_to_f16_afn:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s3, 0xf000
-; SI-NEXT: s_movk_i32 s2, 0x7e00
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_lshr_b32 s0, s7, 8
-; SI-NEXT: s_and_b32 s1, s7, 0x1ff
-; SI-NEXT: s_and_b32 s8, s0, 0xffe
-; SI-NEXT: s_or_b32 s0, s1, s6
-; SI-NEXT: s_cmp_lg_u32 s0, 0
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT: s_bfe_u32 s0, s7, 0xb0014
-; SI-NEXT: v_readfirstlane_b32 s1, v0
-; SI-NEXT: s_sub_i32 s6, 0x3f1, s0
-; SI-NEXT: s_or_b32 s1, s8, s1
-; SI-NEXT: v_med3_i32 v0, s6, 0, 13
-; SI-NEXT: s_or_b32 s6, s1, 0x1000
-; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_lshr_b32 s9, s6, s8
-; SI-NEXT: s_lshl_b32 s8, s9, s8
-; SI-NEXT: s_cmp_lg_u32 s8, s6
-; SI-NEXT: s_cselect_b32 s6, 1, 0
-; SI-NEXT: s_addk_i32 s0, 0xfc10
-; SI-NEXT: s_or_b32 s6, s9, s6
-; SI-NEXT: s_lshl_b32 s8, s0, 12
-; SI-NEXT: s_or_b32 s8, s1, s8
-; SI-NEXT: s_cmp_lt_i32 s0, 1
-; SI-NEXT: s_cselect_b32 s6, s6, s8
-; SI-NEXT: s_and_b32 s8, s6, 7
-; SI-NEXT: s_cmp_gt_i32 s8, 5
-; SI-NEXT: s_cselect_b32 s9, 1, 0
-; SI-NEXT: s_cmp_eq_u32 s8, 3
-; SI-NEXT: s_cselect_b32 s8, 1, 0
-; SI-NEXT: s_lshr_b32 s6, s6, 2
-; SI-NEXT: s_or_b32 s8, s8, s9
-; SI-NEXT: s_add_i32 s6, s6, s8
-; SI-NEXT: s_cmp_lt_i32 s0, 31
-; SI-NEXT: s_cselect_b32 s6, s6, 0x7c00
-; SI-NEXT: s_cmp_lg_u32 s1, 0
-; SI-NEXT: s_cselect_b32 s1, s2, 0x7c00
-; SI-NEXT: s_cmpk_eq_i32 s0, 0x40f
-; SI-NEXT: s_cselect_b32 s0, s1, s6
-; SI-NEXT: s_lshr_b32 s1, s7, 16
-; SI-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-NEXT: s_or_b32 s6, s1, s0
-; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: s_mov_b32 s0, s4
-; SI-NEXT: s_mov_b32 s1, s5
-; SI-NEXT: v_mov_b32_e32 v0, s6
-; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-SAFE-SDAG: ; %bb.0:
-; VI-SAFE-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT: s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT: s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT: s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT: s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; VI-SDAG: ; %bb.0:
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s2, -1
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
; VI-GISEL: ; %bb.0:
@@ -720,68 +590,16 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
; VI-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-GISEL-NEXT: s_endpgm
;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-UNSAFE-SDAG: ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-SAFE-SDAG: ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT: s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX10-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT: s_endpgm
;
; GFX10-GISEL-LABEL: fptrunc_f64_to_f16_afn:
; GFX10-GISEL: ; %bb.0:
@@ -794,74 +612,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
; GFX10-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX10-GISEL-NEXT: s_endpgm
;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-UNSAFE-SDAG: ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT: s_endpgm
-;
; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX11-SAFE-SDAG: ; %bb.0:
; GFX11-SAFE-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX11-SAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0.l, v0
; GFX11-SAFE-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-SAFE-SDAG-NEXT: s_endpgm
;
@@ -1833,4 +1592,8 @@ define amdgpu_kernel void @fptrunc_v8f64_to_v8f32_afn(ptr addrspace(1) %out, <8
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX10-SAFE-GISEL: {{.*}}
+; GFX10-SAFE-SDAG: {{.*}}
+; GFX10-UNSAFE-SDAG: {{.*}}
; VI-SAFE-GISEL: {{.*}}
+; VI-SAFE-SDAG: {{.*}}
+; VI-UNSAFE-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/freeze.ll b/llvm/test/CodeGen/AMDGPU/freeze.ll
index 9a347d7..ac4f0df 100644
--- a/llvm/test/CodeGen/AMDGPU/freeze.ll
+++ b/llvm/test/CodeGen/AMDGPU/freeze.ll
@@ -11532,15 +11532,13 @@ define void @freeze_v8p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
; GFX11-GISEL-LABEL: freeze_v8p5:
; GFX11-GISEL: ; %bb.0:
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-GISEL-NEXT: v_add_nc_u32_e32 v6, 16, v0
; GFX11-GISEL-NEXT: s_clause 0x1
; GFX11-GISEL-NEXT: scratch_load_b128 v[2:5], v0, off
-; GFX11-GISEL-NEXT: scratch_load_b128 v[6:9], v6, off
-; GFX11-GISEL-NEXT: v_add_nc_u32_e32 v0, 16, v1
+; GFX11-GISEL-NEXT: scratch_load_b128 v[6:9], v0, off offset:16
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(1)
; GFX11-GISEL-NEXT: scratch_store_b128 v1, v[2:5], off
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX11-GISEL-NEXT: scratch_store_b128 v0, v[6:9], off
+; GFX11-GISEL-NEXT: scratch_store_b128 v1, v[6:9], off offset:16
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
%a = load <8 x ptr addrspace(5)>, ptr addrspace(5) %ptra
%freeze = freeze <8 x ptr addrspace(5)> %a
@@ -12072,25 +12070,19 @@ define void @freeze_v16p5(ptr addrspace(5) %ptra, ptr addrspace(5) %ptrb) {
; GFX11-GISEL-LABEL: freeze_v16p5:
; GFX11-GISEL: ; %bb.0:
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-GISEL-NEXT: v_add_nc_u32_e32 v6, 16, v0
-; GFX11-GISEL-NEXT: v_add_nc_u32_e32 v10, 32, v0
-; GFX11-GISEL-NEXT: v_add_nc_u32_e32 v14, 48, v0
-; GFX11-GISEL-NEXT: v_add_nc_u32_e32 v18, 32, v1
; GFX11-GISEL-NEXT: s_clause 0x3
; GFX11-GISEL-NEXT: scratch_load_b128 v[2:5], v0, off
-; GFX11-GISEL-NEXT: scratch_load_b128 v[6:9], v6, off
-; GFX11-GISEL-NEXT: scratch_load_b128 v[10:13], v10, off
-; GFX11-GISEL-NEXT: scratch_load_b128 v[14:17], v14, off
-; GFX11-GISEL-NEXT: v_add_nc_u32_e32 v0, 16, v1
-; GFX11-GISEL-NEXT: v_add_nc_u32_e32 v19, 48, v1
+; GFX11-GISEL-NEXT: scratch_load_b128 v[6:9], v0, off offset:16
+; GFX11-GISEL-NEXT: scratch_load_b128 v[10:13], v0, off offset:32
+; GFX11-GISEL-NEXT: scratch_load_b128 v[14:17], v0, off offset:48
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(3)
; GFX11-GISEL-NEXT: scratch_store_b128 v1, v[2:5], off
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(2)
-; GFX11-GISEL-NEXT: scratch_store_b128 v0, v[6:9], off
+; GFX11-GISEL-NEXT: scratch_store_b128 v1, v[6:9], off offset:16
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(1)
-; GFX11-GISEL-NEXT: scratch_store_b128 v18, v[10:13], off
+; GFX11-GISEL-NEXT: scratch_store_b128 v1, v[10:13], off offset:32
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX11-GISEL-NEXT: scratch_store_b128 v19, v[14:17], off
+; GFX11-GISEL-NEXT: scratch_store_b128 v1, v[14:17], off offset:48
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
%a = load <16 x ptr addrspace(5)>, ptr addrspace(5) %ptra
%freeze = freeze <16 x ptr addrspace(5)> %a
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
index 87c7cce..f81950b 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
@@ -1294,13 +1294,13 @@ define float @v_sqrt_f32__enough_unsafe_attrs(float %x) #3 {
ret float %result
}
-define float @v_sqrt_f32__unsafe_attr(float %x) #4 {
+define float @v_sqrt_f32__unsafe_attr(float %x) {
; GCN-LABEL: v_sqrt_f32__unsafe_attr:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_sqrt_f32_e32 v0, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
- %result = call nsz float @llvm.sqrt.f32(float %x)
+ %result = call afn nsz float @llvm.sqrt.f32(float %x)
ret float %result
}
@@ -4763,7 +4763,6 @@ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memo
attributes #1 = { convergent nounwind willreturn memory(none) }
attributes #2 = { "approx-func-fp-math"="true" }
attributes #3 = { "approx-func-fp-math"="true" "no-nans-fp-math"="true" "no-infs-fp-math"="true" }
-attributes #4 = { "unsafe-fp-math"="true" }
attributes #5 = { "no-infs-fp-math"="true" }
!0 = !{float 0.5}
diff --git a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-fence-soft.mir b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-fence-soft.mir
new file mode 100644
index 0000000..675a1c9
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-fence-soft.mir
@@ -0,0 +1,133 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefix=GCN %s
+
+
+# Expected vmcnt(0) since the direct load is the only load.
+---
+name: dma_then_fence
+body: |
+ bb.0:
+ ; GCN-LABEL: name: dma_then_fence
+ ; GCN: S_WAITCNT 0
+ ; GCN-NEXT: $m0 = S_MOV_B32 0
+ ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) poison` + 4, addrspace 3)
+ ; GCN-NEXT: S_WAITCNT 3952
+ ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ ; GCN-NEXT: S_ENDPGM 0
+ $m0 = S_MOV_B32 0
+ BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+ S_WAITCNT_lds_direct
+ $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ S_ENDPGM 0
+
+...
+
+# Expected vmcnt(1) since the global load is not processed by SIInsertWaitcnts.
+
+---
+name: dma_then_global_load
+body: |
+ bb.0:
+ ; GCN-LABEL: name: dma_then_global_load
+ ; GCN: S_WAITCNT 0
+ ; GCN-NEXT: $m0 = S_MOV_B32 0
+ ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) poison` + 4, addrspace 3)
+ ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ ; GCN-NEXT: S_WAITCNT 3953
+ ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ ; GCN-NEXT: S_ENDPGM 0
+ $m0 = S_MOV_B32 0
+ BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+ $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ S_WAITCNT_lds_direct
+ $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ S_ENDPGM 0
+
+...
+
+# Expected no vmcnt since there is no direct load to LDS, and the global load is not processed by SIInsertWaitcnts.
+
+---
+name: no_dma_just_fence
+body: |
+ bb.0:
+ ; GCN-LABEL: name: no_dma_just_fence
+ ; GCN: S_WAITCNT 0
+ ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ ; GCN-NEXT: S_ENDPGM 0
+ $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ S_WAITCNT_lds_direct
+ $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ S_ENDPGM 0
+
+...
+
+# Expected vmcnt(1) since the global load is not processed by SIInsertWaitcnts.
+
+---
+name: dma_then_system_fence
+body: |
+ bb.0:
+ ; GCN-LABEL: name: dma_then_system_fence
+ ; GCN: S_WAITCNT 0
+ ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) poison` + 4, addrspace 3)
+ ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ ; GCN-NEXT: S_WAITCNT 3953
+ ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ ; GCN-NEXT: S_ENDPGM 0
+ BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+ $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ S_WAITCNT_lds_direct
+ $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ S_ENDPGM 0
+
+...
+
+# The computed vmcnt(1) gets merged with the existing vmcnt(0).
+
+---
+name: merge_with_prev_wait
+body: |
+ bb.0:
+ ; GCN-LABEL: name: merge_with_prev_wait
+ ; GCN: S_WAITCNT 0
+ ; GCN-NEXT: $m0 = S_MOV_B32 0
+ ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) poison` + 4, addrspace 3)
+ ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ ; GCN-NEXT: S_WAITCNT 3952
+ ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ ; GCN-NEXT: S_ENDPGM 0
+ $m0 = S_MOV_B32 0
+ BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+ $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ S_WAITCNT 3952
+ S_WAITCNT_lds_direct
+ $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ S_ENDPGM 0
+
+...
+
+# The computed vmcnt(1) gets merged with the existing vmcnt(0).
+
+---
+name: merge_with_next_wait
+body: |
+ bb.0:
+ ; GCN-LABEL: name: merge_with_next_wait
+ ; GCN: S_WAITCNT 0
+ ; GCN-NEXT: $m0 = S_MOV_B32 0
+ ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) poison` + 4, addrspace 3)
+ ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ ; GCN-NEXT: S_WAITCNT 3952
+ ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ ; GCN-NEXT: S_ENDPGM 0
+ $m0 = S_MOV_B32 0
+ BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+ $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
+ S_WAITCNT_lds_direct
+ S_WAITCNT 3952
+ $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+ S_ENDPGM 0
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
index 546144da..742d87f 100644
--- a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
@@ -27,6 +27,9 @@
; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-TRUE16 %s
; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-FAKE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
; Test for integer mad formation for patterns used in clpeak
define i32 @clpeak_imad_pat_i32(i32 %x, i32 %y) {
@@ -221,6 +224,38 @@ define i32 @clpeak_imad_pat_i32(i32 %x, i32 %y) {
; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_i32:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v0, v2, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_i32:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, 1, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add i32 %x, 1
%add = mul i32 %y18, %y
@@ -459,6 +494,37 @@ define signext i16 @clpeak_imad_pat_i16(i16 signext %x, i16 signext %y) {
; GFX1200-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1200-GISEL-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX1200-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_i16:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_i16:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v0, v0, 1
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v2, v1, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v3, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v2, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v1, v2, v3, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%conv33 = add i16 %x, 1
%add = mul i16 %conv33, %y
@@ -652,6 +718,21 @@ define <2 x i16> @clpeak_imad_pat_v2i16(<2 x i16> %x, <2 x i16> %y) {
; GFX1200-NEXT: v_pk_mul_lo_u16 v0, v3, v0
; GFX1200-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: clpeak_imad_pat_v2i16:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_pk_mad_u16 v2, v0, v1, v0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v0, v1, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: v_pk_mul_lo_u16 v3, v2, v1
+; GFX1250-NEXT: v_pk_mad_u16 v1, v2, v1, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v3, v0
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add <2 x i16> %x, <i16 1, i16 1>
%add = mul <2 x i16> %y18, %y
@@ -998,6 +1079,54 @@ define <3 x i16> @clpeak_imad_pat_v3i16(<3 x i16> %x, <3 x i16> %y) {
; GFX1200-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX1200-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_v3i16:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-SDAG-NEXT: v_pk_add_u16 v1, v1, 1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v4, v0, v2, v0
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v5, v1, v3, v1
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v0, v0, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v1, v1, v3, 1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v6, v4, v2
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v7, v5, v3
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v3, v5, v3, 1
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v2, v4, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v0, v6, v0
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v1, v7, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_v3i16:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-GISEL-NEXT: v_pk_add_u16 v1, v1, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v4, v0, v2, v0
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v5, v1, v3, v1
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v0, v0, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v1, v1, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v6, v4, v2
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v7, v5, v3
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v2, v4, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v3, v5, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v1, v7, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y48 = add <3 x i16> %x, <i16 1, i16 1, i16 1>
%add = mul <3 x i16> %y48, %y
@@ -1429,6 +1558,54 @@ define <4 x i16> @clpeak_imad_pat_v4i16(<4 x i16> %x, <4 x i16> %y) {
; GFX1200-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX1200-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_v4i16:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-SDAG-NEXT: v_pk_add_u16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v4, v0, v2, v0
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v5, v1, v3, v1
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v0, v0, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v1, v1, v3, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v6, v4, v2
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v7, v5, v3
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v3, v5, v3, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v2, v4, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v0, v6, v0
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v1, v7, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_v4i16:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-GISEL-NEXT: v_pk_add_u16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v4, v0, v2, v0
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v5, v1, v3, v1
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v0, v0, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v1, v1, v3, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v6, v4, v2
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v7, v5, v3
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v2, v4, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v3, v5, v3, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v1, v7, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add <4 x i16> %x, <i16 1, i16 1, i16 1, i16 1>
%add = mul <4 x i16> %y18, %y
@@ -1662,6 +1839,37 @@ define zeroext i16 @clpeak_umad_pat_i16(i16 zeroext %x, i16 zeroext %y) {
; GFX1200-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1200-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1200-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_umad_pat_i16:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_umad_pat_i16:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v0, v0, 1
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v2, v1, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v3, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v2, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v1, v2, v3, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%conv33 = add i16 %x, 1
%add = mul i16 %conv33, %y
@@ -1855,6 +2063,21 @@ define <2 x i16> @clpeak_umad_pat_v2i16(<2 x i16> %x, <2 x i16> %y) {
; GFX1200-NEXT: v_pk_mul_lo_u16 v0, v3, v0
; GFX1200-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: clpeak_umad_pat_v2i16:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_pk_mad_u16 v2, v0, v1, v0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v0, v1, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: v_pk_mul_lo_u16 v3, v2, v1
+; GFX1250-NEXT: v_pk_mad_u16 v1, v2, v1, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v3, v0
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add <2 x i16> %x, <i16 1, i16 1>
%add = mul <2 x i16> %y18, %y
@@ -2201,6 +2424,54 @@ define <3 x i16> @clpeak_umad_pat_v3i16(<3 x i16> %x, <3 x i16> %y) {
; GFX1200-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX1200-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_umad_pat_v3i16:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-SDAG-NEXT: v_pk_add_u16 v1, v1, 1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v4, v0, v2, v0
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v5, v1, v3, v1
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v0, v0, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v1, v1, v3, 1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v6, v4, v2
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v7, v5, v3
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v3, v5, v3, 1
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v2, v4, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v0, v6, v0
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v1, v7, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_umad_pat_v3i16:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-GISEL-NEXT: v_pk_add_u16 v1, v1, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v4, v0, v2, v0
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v5, v1, v3, v1
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v0, v0, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v1, v1, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v6, v4, v2
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v7, v5, v3
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v2, v4, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v3, v5, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v1, v7, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y48 = add <3 x i16> %x, <i16 1, i16 1, i16 1>
%add = mul <3 x i16> %y48, %y
@@ -2632,6 +2903,54 @@ define <4 x i16> @clpeak_umad_pat_v4i16(<4 x i16> %x, <4 x i16> %y) {
; GFX1200-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX1200-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_umad_pat_v4i16:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-SDAG-NEXT: v_pk_add_u16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v4, v0, v2, v0
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v5, v1, v3, v1
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v0, v0, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v1, v1, v3, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v6, v4, v2
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v7, v5, v3
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v3, v5, v3, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: v_pk_mad_u16 v2, v4, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v0, v6, v0
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v1, v7, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX1250-SDAG-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_umad_pat_v4i16:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-GISEL-NEXT: v_pk_add_u16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v4, v0, v2, v0
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v5, v1, v3, v1
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v0, v0, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v1, v1, v3, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v6, v4, v2
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v7, v5, v3
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v2, v4, v2, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: v_pk_mad_u16 v3, v5, v3, 1 op_sel_hi:[1,1,0]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v1, v7, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX1250-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add <4 x i16> %x, <i16 1, i16 1, i16 1, i16 1>
%add = mul <4 x i16> %y18, %y
@@ -2947,6 +3266,50 @@ define <2 x i32> @clpeak_imad_pat_v2i32(<2 x i32> %x, <2 x i32> %y) {
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v2, v0
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v3, v1
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_v2i32:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v4, v0, v2
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v5, v1, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v0, v4, v0 :: v_dual_add_nc_u32 v1, v5, v1
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX1250-SDAG-NEXT: v_mad_u32 v2, v0, v4, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u32 v3, v1, v5, v1
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v2, v0, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v3, v1, v3
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_v2i32:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v4, v0, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v5, v1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, v4, v0 :: v_dual_add_nc_u32 v1, v5, v1
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v2, 1, v4 :: v_dual_add_nc_u32 v3, 1, v5
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v3, v1, v3
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v3, v1
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add <2 x i32> %x, <i32 1, i32 1>
%add = mul <2 x i32> %y18, %y
@@ -3376,6 +3739,73 @@ define <3 x i32> @clpeak_imad_pat_v3i32(<3 x i32> %x, <3 x i32> %y) {
; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v5, v2
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_v3i32:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v6, v0, v3
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v2, 1, v2
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v7, v1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, v6, v0
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v8, v2, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v1, v7, v1
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v0, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v2, v8, v2
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v1, v1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_mad_u32 v3, v0, v6, v0
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v2, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_mad_u32 v4, v1, v7, v1
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v3, v0, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_mad_u32 v5, v2, v8, v2
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v4, v1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u32 v2, v5, v2, v5
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_v3i32:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v6, v0, v3
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v2, 1, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v7, v1, v4
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v8, v2, v5
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, v7, v1
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v0, v3
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v3, 1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v2, v8, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v1, v4
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v4, 1, v7
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v3, v0, v3
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v2, v5
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v5, 1, v8
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v4, v1, v4
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, 1, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v5, v2, v5
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v2, 1, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v3, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v4, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v5, v2
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y48 = add <3 x i32> %x, <i32 1, i32 1, i32 1>
%add = mul <3 x i32> %y48, %y
@@ -3874,6 +4304,80 @@ define <4 x i32> @clpeak_imad_pat_v4i32(<4 x i32> %x, <4 x i32> %y) {
; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v3, v6, v3
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_v4i32:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v2, 1, v2 :: v_dual_add_nc_u32 v3, 1, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v8, v0, v4
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v9, v1, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v10, v2, v6
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v11, v3, v7
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v0, v8, v0 :: v_dual_add_nc_u32 v1, v9, v1
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v2, v10, v2 :: v_dual_add_nc_u32 v3, v11, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_mad_u32 v4, v0, v8, v0
+; GFX1250-SDAG-NEXT: v_mad_u32 v5, v1, v9, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_mad_u32 v6, v2, v10, v2
+; GFX1250-SDAG-NEXT: v_mad_u32 v7, v3, v11, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v4, v0, v4
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v5, v1, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-SDAG-NEXT: v_mad_u32 v2, v6, v2, v6
+; GFX1250-SDAG-NEXT: v_mad_u32 v3, v7, v3, v7
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_v4i32:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v2, 1, v2 :: v_dual_add_nc_u32 v3, 1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v8, v0, v4
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v9, v1, v5
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v10, v2, v6
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v11, v3, v7
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, v8, v0 :: v_dual_add_nc_u32 v1, v9, v1
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v2, v10, v2 :: v_dual_add_nc_u32 v3, v11, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v4, 1, v8 :: v_dual_add_nc_u32 v5, 1, v9
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v6, 1, v10 :: v_dual_add_nc_u32 v7, 1, v11
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v4, v0, v4
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v5, v1, v5
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v6, v2, v6
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v7, v3, v7
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v2, 1, v2 :: v_dual_add_nc_u32 v3, 1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v4, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v5, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v6, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v3, v7, v3
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
%add = mul <4 x i32> %y18, %y
@@ -4106,6 +4610,42 @@ define i32 @clpeak_imad_pat_i24(i32 %x, i32 %y) {
; GFX1200-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_i24:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 24
+; GFX1250-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 24
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v1, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v0, v2, v0
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_i24:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_bfe_i32 v0, v0, 0, 24
+; GFX1250-GISEL-NEXT: v_bfe_i32 v1, v1, 0, 24
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v1, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, 1, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%shl = shl i32 %x, 8
%shr = ashr exact i32 %shl, 8
@@ -4342,6 +4882,42 @@ define i32 @clpeak_imad_pat_u24(i32 %x, i32 %y) {
; GFX1200-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_u24:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, 0xffffff, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v1, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v0, v2, v0
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_u24:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v1, 0xffffff, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v1, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, 1, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%shl = and i32 %x, 16777215
%shl1 = and i32 %y, 16777215
@@ -4582,6 +5158,37 @@ define signext i8 @clpeak_imad_pat_i8(i8 signext %x, i8 signext %y) {
; GFX1200-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1200-GISEL-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX1200-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_i8:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_i8:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v0, v0, 1
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v2, v1, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v3, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v2, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v1, v2, v3, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%conv33 = add i8 %x, 1
%add = mul i8 %conv33, %y
@@ -5001,6 +5608,56 @@ define <2 x i8> @clpeak_imad_pat_v2i8(<2 x i8> %x, <2 x i8> %y) {
; GFX1200-GISEL-FAKE16-NEXT: v_mul_lo_u16 v0, v0, v2
; GFX1200-GISEL-FAKE16-NEXT: v_mul_lo_u16 v1, v1, v3
; GFX1200-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_v2i8:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_add_nc_u16 v1, v1, 1
+; GFX1250-SDAG-NEXT: v_add_nc_u16 v0, v0, 1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_mad_u16 v4, v1, v3, v1
+; GFX1250-SDAG-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX1250-SDAG-NEXT: v_mad_u16 v5, v0, v2, v0
+; GFX1250-SDAG-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_mul_lo_u16 v3, v4, v3
+; GFX1250-SDAG-NEXT: v_mul_lo_u16 v2, v5, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v3, v1, v3
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v2, v0, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v1, v3, v1
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v0, v2, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_lshlrev_b16 v2, 8, v1
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX1250-SDAG-NEXT: v_bitop3_b16 v0, v0, v2, 0xff bitop3:0xec
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_v2i8:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v0, v0, 1
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v1, v1, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mad_u16 v4, v0, v2, v0
+; GFX1250-GISEL-NEXT: v_mad_u16 v5, v1, v3, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v2, 1
+; GFX1250-GISEL-NEXT: v_mad_u16 v1, v1, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v6, v4, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v7, v5, v3
+; GFX1250-GISEL-NEXT: v_mad_u16 v2, v4, v2, 1
+; GFX1250-GISEL-NEXT: v_mad_u16 v3, v5, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v6, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v7, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add <2 x i8> %x, <i8 1, i8 1>
%add = mul <2 x i8> %y18, %y
@@ -5508,6 +6165,44 @@ define i64 @clpeak_imad_pat_i64(i64 %x, i64 %y) {
; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v8, v[2:3]
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_i64:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 1, v[0:1]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_u64_e32 v[4:5], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[4:5], v[0:1]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_u64_e32 v[2:3], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_mad_nc_u64_u32 v[6:7], v2, v4, v[2:3]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v3, v4, v7
+; GFX1250-SDAG-NEXT: v_mad_u32 v7, v2, v5, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_nc_u64_u32 v[0:1], v6, v2, v[6:7]
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v7, v2, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v6, v3, v1
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_i64:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], 1, v[0:1]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[4:5], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[4:5], v[0:1]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[4:5]
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[2:3], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], 1, v[0:1]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[0:1], v[2:3], v[0:1]
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add i64 %x, 1
%add = mul i64 %y18, %y
@@ -6416,6 +7111,68 @@ define <2 x i64> @clpeak_imad_pat_v2i64(<2 x i64> %x, <2 x i64> %y) {
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v15, v14
; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v3, v14, v[7:8]
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_v2i64:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 1, v[0:1]
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[2:3]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mul_u64_e32 v[8:9], v[0:1], v[4:5]
+; GFX1250-SDAG-NEXT: v_mul_u64_e32 v[10:11], v[2:3], v[6:7]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[8:9], v[0:1]
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], v[10:11], v[2:3]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mul_u64_e32 v[4:5], v[0:1], v[4:5]
+; GFX1250-SDAG-NEXT: v_mul_u64_e32 v[6:7], v[2:3], v[6:7]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_nc_u64_u32 v[12:13], v4, v8, v[4:5]
+; GFX1250-SDAG-NEXT: v_mad_nc_u64_u32 v[14:15], v6, v10, v[6:7]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v5, v8, v13
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v7, v10, v15
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u32 v13, v4, v9, v0
+; GFX1250-SDAG-NEXT: v_mad_u32 v15, v6, v11, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_nc_u64_u32 v[0:1], v12, v4, v[12:13]
+; GFX1250-SDAG-NEXT: v_mad_nc_u64_u32 v[2:3], v14, v6, v[14:15]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v13, v4, v1
+; GFX1250-SDAG-NEXT: v_mad_u32 v3, v15, v6, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v12, v5, v1
+; GFX1250-SDAG-NEXT: v_mad_u32 v3, v14, v7, v3
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_v2i64:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], 1, v[0:1]
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[2:3]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[8:9], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[10:11], v[2:3], v[6:7]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[8:9], v[0:1]
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], v[10:11], v[2:3]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[2:3], v[2:3], v[6:7]
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[8:9]
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[6:7], 1, v[10:11]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[4:5], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[6:7], v[2:3], v[6:7]
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], 1, v[0:1]
+; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[2:3]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[0:1], v[4:5], v[0:1]
+; GFX1250-GISEL-NEXT: v_mul_u64_e32 v[2:3], v[6:7], v[2:3]
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y18 = add <2 x i64> %x, <i64 1, i64 1>
%add = mul <2 x i64> %y18, %y
@@ -6673,6 +7430,50 @@ define i32 @v_multi_use_mul_chain_add_other_use_all(i32 %arg, i32 %arg1, i32 %ar
; GFX1200-NEXT: s_wait_storecnt 0x0
; GFX1200-NEXT: v_add_nc_u32_e32 v0, v5, v0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: v_multi_use_mul_chain_add_other_use_all:
+; GFX1250-SDAG: ; %bb.0: ; %bb
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_add_nc_u32 v0, 1, v0
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v2
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v3, v1, v0
+; GFX1250-SDAG-NEXT: global_store_b32 v[4:5], v2, off scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: global_store_b32 v[4:5], v1, off scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: global_store_b32 v[4:5], v3, off scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, v3, v0
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: v_multi_use_mul_chain_add_other_use_all:
+; GFX1250-GISEL: ; %bb.0: ; %bb
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_mov_b32 v2, v3
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v3, v4
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v4, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, v4, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v4
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v5, v1, v0
+; GFX1250-GISEL-NEXT: global_store_b32 v[2:3], v4, off scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: global_store_b32 v[2:3], v1, off scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: global_store_b32 v[2:3], v5, off scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, v5, v0
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
bb:
%i = add i32 %arg, 1
%i3 = mul i32 %i, %arg1
@@ -6906,6 +7707,46 @@ define i32 @v_multi_use_mul_chain_add_other_use_some(i32 %arg, i32 %arg1, i32 %a
; GFX1200-NEXT: s_wait_storecnt 0x0
; GFX1200-NEXT: v_add_nc_u32_e32 v0, v5, v1
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: v_multi_use_mul_chain_add_other_use_some:
+; GFX1250-SDAG: ; %bb.0: ; %bb
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_add_nc_u32 v0, 1, v0
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v2
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v3, v0, v1
+; GFX1250-SDAG-NEXT: global_store_b32 v[4:5], v2, off scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: global_store_b32 v[4:5], v3, off scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, v3, v1
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: v_multi_use_mul_chain_add_other_use_some:
+; GFX1250-GISEL: ; %bb.0: ; %bb
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_mov_b32 v2, v3
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v3, v4
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v4, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, v4, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, 1, v4
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v5, v0, v1
+; GFX1250-GISEL-NEXT: global_store_b32 v[2:3], v4, off scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: global_store_b32 v[2:3], v5, off scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, v5, v1
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
bb:
%i = add i32 %arg, 1
%i3 = mul i32 %i, %arg1
@@ -7235,6 +8076,60 @@ define i32 @clpeak_imad_pat_i32_x2(i32 %x, i32 %y) {
; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_i32_x2:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v2
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v1, v2, v1
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-SDAG-NEXT: v_add_nc_u32_e32 v1, v2, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v0, v2, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_i32_x2:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, v2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, 1, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, v2, v1
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, 1, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, v2, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v1, 1, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y38 = add i32 %x, 1
%add = mul i32 %y38, %y
@@ -7806,6 +8701,84 @@ define <2 x i32> @clpeak_imad_pat_v2i32_x2(<2 x i32> %x, <2 x i32> %y) {
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v2, v0
; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v3, v1
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_v2i32_x2:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v4, v0, v2
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v5, v1, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v0, v4, v0 :: v_dual_add_nc_u32 v1, v5, v1
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v2, 1, v4 :: v_dual_add_nc_u32 v3, 1, v5
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v4, v0, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v5, v1, v3
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v2, v4, v2 :: v_dual_add_nc_u32 v3, v5, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v2, v0
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v1, v3, v1
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v2, 1, v4 :: v_dual_add_nc_u32 v3, 1, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v4, v0, v2
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v5, v1, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_add_nc_u32 v2, v4, v2 :: v_dual_add_nc_u32 v3, v5, v3
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v0, v2, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mul_lo_u32 v1, v3, v1
+; GFX1250-SDAG-NEXT: v_mad_u32 v2, v0, v4, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u32 v3, v1, v5, v1
+; GFX1250-SDAG-NEXT: v_mad_u32 v0, v2, v0, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_mad_u32 v1, v3, v1, v3
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_v2i32_x2:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v4, v0, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v5, v1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, v4, v0 :: v_dual_add_nc_u32 v1, v5, v1
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v2, 1, v4 :: v_dual_add_nc_u32 v3, 1, v5
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v4, v0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v5, v1, v3
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v2, v4, v2 :: v_dual_add_nc_u32 v3, v5, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v2, v0
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v3, v1
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v2, 1, v4 :: v_dual_add_nc_u32 v3, 1, v5
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v4, v0, v2
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v5, v1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v2, v4, v2 :: v_dual_add_nc_u32 v3, v5, v3
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v3, v1
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v2, 1, v4 :: v_dual_add_nc_u32 v3, 1, v5
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v2, v0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v3, v1, v3
+; GFX1250-GISEL-NEXT: v_dual_add_nc_u32 v0, 1, v0 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v0, v2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u32 v1, v3, v1
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%y38 = add <2 x i32> %x, <i32 1, i32 1>
%add = mul <2 x i32> %y38, %y
@@ -8168,6 +9141,53 @@ define signext i16 @clpeak_imad_pat_i16_x2(i16 signext %x, i16 signext %y) {
; GFX1200-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1200-GISEL-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX1200-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_imad_pat_i16_x2:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_imad_pat_i16_x2:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v0, v0, 1
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v2, v1, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v3, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v2, v3
+; GFX1250-GISEL-NEXT: v_mad_u16 v2, v2, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v3, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v2, v3
+; GFX1250-GISEL-NEXT: v_mad_u16 v2, v2, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v3, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v2, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v1, v2, v3, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%conv69 = add i16 %x, 1
%add = mul i16 %conv69, %y
@@ -8525,6 +9545,53 @@ define zeroext i16 @clpeak_umad_pat_i16_x2(i16 zeroext %x, i16 zeroext %y) {
; GFX1200-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1200-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1200-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: clpeak_umad_pat_i16_x2:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v1, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v1, v0, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: clpeak_umad_pat_i16_x2:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v0, v0, 1
+; GFX1250-GISEL-NEXT: v_add_nc_u16 v2, v1, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v3, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v2, v3
+; GFX1250-GISEL-NEXT: v_mad_u16 v2, v2, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v3, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v2, v3
+; GFX1250-GISEL-NEXT: v_mad_u16 v2, v2, v3, 1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v3, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v1, v2, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: v_mad_u16 v1, v2, v3, 1
+; GFX1250-GISEL-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%conv69 = add i16 %x, 1
%add = mul i16 %conv69, %y
@@ -8842,6 +9909,29 @@ define <2 x i16> @clpeak_imad_pat_v2i16_x2(<2 x i16> %x, <2 x i16> %y) {
; GFX1200-NEXT: v_pk_mul_lo_u16 v0, v3, v0
; GFX1200-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: clpeak_imad_pat_v2i16_x2:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_pk_mad_u16 v2, v0, v1, v0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v0, v1, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: v_pk_mul_lo_u16 v1, v2, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_pk_mad_u16 v2, v1, v0, v0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v1, v0, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: v_pk_mul_lo_u16 v1, v2, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_pk_mad_u16 v2, v1, v0, v0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v1, v0, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: v_pk_mul_lo_u16 v3, v2, v1
+; GFX1250-NEXT: v_pk_mad_u16 v1, v2, v1, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v3, v0
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%y38 = add <2 x i16> %x, <i16 1, i16 1>
%add = mul <2 x i16> %y38, %y
@@ -9159,6 +10249,29 @@ define <2 x i16> @clpeak_umad_pat_v2i16_x2(<2 x i16> %x, <2 x i16> %y) {
; GFX1200-NEXT: v_pk_mul_lo_u16 v0, v3, v0
; GFX1200-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: clpeak_umad_pat_v2i16_x2:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_add_u16 v0, v0, 1 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_pk_mad_u16 v2, v0, v1, v0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v0, v1, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: v_pk_mul_lo_u16 v1, v2, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_pk_mad_u16 v2, v1, v0, v0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v1, v0, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: v_pk_mul_lo_u16 v1, v2, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_pk_mad_u16 v2, v1, v0, v0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v1, v0, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: v_pk_mul_lo_u16 v3, v2, v1
+; GFX1250-NEXT: v_pk_mad_u16 v1, v2, v1, 1 op_sel_hi:[1,1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v3, v0
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%y38 = add <2 x i16> %x, <i16 1, i16 1>
%add = mul <2 x i16> %y38, %y
@@ -9234,6 +10347,15 @@ define <2 x i32> @multi_use_mul_mad_i32_var(i32 %x, i32 %y, i32 %z0, i32 %z1) {
; GFX1200-NEXT: v_add_nc_u32_e32 v0, v1, v2
; GFX1200-NEXT: v_add_nc_u32_e32 v1, v1, v3
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: multi_use_mul_mad_i32_var:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_add_nc_u32 v0, v1, v2 :: v_dual_add_nc_u32 v1, v1, v3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%mul = mul i32 %x, %y
%add0 = add i32 %mul, %z0
@@ -9394,6 +10516,27 @@ define <2 x i16> @multi_use_mul_mad_i16_var(i16 %x, i16 %y, i16 %z0, i16 %z1) {
; GFX1200-GISEL-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
; GFX1200-GISEL-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
; GFX1200-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-SDAG-LABEL: multi_use_mul_mad_i16_var:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_mad_u16 v2, v0, v1, v2
+; GFX1250-SDAG-NEXT: v_mad_u16 v0, v0, v1, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_perm_b32 v0, v0, v2, 0x5040100
+; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: multi_use_mul_mad_i16_var:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_mad_u16 v2, v0, v1, v2
+; GFX1250-GISEL-NEXT: v_mad_u16 v0, v0, v1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX1250-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
entry:
%mul = mul i16 %x, %y
%add0 = add i16 %mul, %z0
@@ -9465,6 +10608,17 @@ define i32 @other_use_mul_mad_i32_var(i32 %x, i32 %y, i32 %z, ptr addrspace(3) %
; GFX1200-NEXT: ds_store_b32 v3, v1
; GFX1200-NEXT: s_wait_dscnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: other_use_mul_mad_i32_var:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_nc_u32_e32 v0, v1, v2
+; GFX1250-NEXT: ds_store_b32 v3, v1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%mul = mul i32 %x, %y
%add0 = add i32 %mul, %z
@@ -9600,6 +10754,16 @@ define i16 @other_use_mul_mad_i16_var(i16 %x, i16 %y, i16 %z, ptr addrspace(3) %
; GFX1200-GISEL-FAKE16-NEXT: ds_store_b16 v3, v4
; GFX1200-GISEL-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX1200-GISEL-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: other_use_mul_mad_i16_var:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mul_lo_u16 v4, v0, v1
+; GFX1250-NEXT: v_mad_u16 v0, v0, v1, v2
+; GFX1250-NEXT: ds_store_b16 v3, v4
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%mul = mul i16 %x, %y
%add0 = add i16 %mul, %z
@@ -9715,6 +10879,16 @@ define <4 x i16> @multi_use_mul_mad_v2i16_var(<2 x i16> %x, <2 x i16> %y, <2 x i
; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1200-NEXT: v_mov_b32_e32 v0, v2
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: multi_use_mul_mad_v2i16_var:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_mad_u16 v2, v0, v1, v2
+; GFX1250-NEXT: v_pk_mad_u16 v1, v0, v1, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%mul = mul <2 x i16> %x, %y
%add0 = add <2 x i16> %mul, %z0
@@ -9842,6 +11016,16 @@ define <2 x i16> @other_use_mul_mad_v2i16_var(<2 x i16> %x, <2 x i16> %y, <2 x i
; GFX1200-NEXT: ds_store_b32 v3, v4
; GFX1200-NEXT: s_wait_dscnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: other_use_mul_mad_v2i16_var:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_mul_lo_u16 v4, v0, v1
+; GFX1250-NEXT: v_pk_mad_u16 v0, v0, v1, v2
+; GFX1250-NEXT: ds_store_b32 v3, v4
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%mul = mul <2 x i16> %x, %y
%add0 = add <2 x i16> %mul, %z
@@ -9925,6 +11109,13 @@ define i64 @mul_u24_add64(i32 %x, i32 %y, i64 %z) {
; GFX1200-NEXT: s_wait_kmcnt 0x0
; GFX1200-NEXT: v_mad_co_u64_u32 v[0:1], null, v0, v1, v[2:3]
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: mul_u24_add64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_co_u64_u32 v[0:1], null, v0, v1, v[2:3]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = call i64 @llvm.amdgcn.mul.u24.i64(i32 %x, i32 %y)
%add = add i64 %mul, %z
ret i64 %add
@@ -9985,6 +11176,16 @@ define i64 @mul_u24_zext_add64(i32 %x, i32 %y, i64 %z) {
; GFX1200-NEXT: s_wait_alu 0xfffd
; GFX1200-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v3, vcc_lo
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: mul_u24_zext_add64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v5, 0
+; GFX1250-NEXT: v_mul_u32_u24_e32 v4, v0, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = call i32 @llvm.amdgcn.mul.u24(i32 %x, i32 %y)
%mul.zext = zext i32 %mul to i64
%add = add i64 %mul.zext, %z
diff --git a/llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll b/llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll
new file mode 100644
index 0000000..d23509b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll
@@ -0,0 +1,543 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=GFX900
+; RUN: llc -mtriple=amdgcn -mcpu=gfx90a < %s | FileCheck %s --check-prefixes=GFX90A
+; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -mattr=+tgsplit < %s | FileCheck %s --check-prefixes=GFX90A-TGSPLIT
+; RUN: llc -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck %s --check-prefixes=GFX942
+; RUN: llc -mtriple=amdgcn -mcpu=gfx942 -mattr=+tgsplit < %s | FileCheck %s --check-prefixes=GFX942-TGSPLIT
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck %s -check-prefixes=GFX10WGP
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=+cumode < %s | FileCheck %s -check-prefixes=GFX10CU
+
+; In each of these tests, an LDS DMA operation is followed by a release pattern
+; at workgroup scope. The fence in such a release (implicit or explicit) should
+; wait for the store component in the LDS DMA. The additional noalias metadata
+; is just meant to ensure that the wait counts are not generated due to some
+; unintended aliasing.
+
+declare void @llvm.amdgcn.raw.buffer.load.lds(<4 x i32> %rsrc, ptr addrspace(3) nocapture, i32 %size, i32 %voffset, i32 %soffset, i32 %offset, i32 %aux)
+
+define amdgpu_kernel void @barrier_release(<4 x i32> inreg %rsrc,
+; GFX900-LABEL: barrier_release:
+; GFX900: ; %bb.0: ; %main_body
+; GFX900-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; GFX900-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX900-NEXT: v_mov_b32_e32 v1, 0
+; GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; GFX900-NEXT: s_mov_b32 m0, s12
+; GFX900-NEXT: s_nop 0
+; GFX900-NEXT: buffer_load_dword v0, s[8:11], 0 offen lds
+; GFX900-NEXT: v_mov_b32_e32 v0, s13
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: s_barrier
+; GFX900-NEXT: ds_read_b32 v0, v0
+; GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; GFX900-NEXT: global_store_dword v1, v0, s[14:15]
+; GFX900-NEXT: s_endpgm
+;
+; GFX90A-LABEL: barrier_release:
+; GFX90A: ; %bb.1:
+; GFX90A-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX90A-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_branch .LBB0_0
+; GFX90A-NEXT: .p2align 8
+; GFX90A-NEXT: ; %bb.2:
+; GFX90A-NEXT: .LBB0_0: ; %main_body
+; GFX90A-NEXT: s_mov_b32 m0, s12
+; GFX90A-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX90A-NEXT: buffer_load_dword v0, s[8:11], 0 offen lds
+; GFX90A-NEXT: v_mov_b32_e32 v0, s13
+; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x3c
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: s_barrier
+; GFX90A-NEXT: ds_read_b32 v0, v0
+; GFX90A-NEXT: v_mov_b32_e32 v1, 0
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX90A-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: barrier_release:
+; GFX90A-TGSPLIT: ; %bb.1:
+; GFX90A-TGSPLIT-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX90A-TGSPLIT-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX90A-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_branch .LBB0_0
+; GFX90A-TGSPLIT-NEXT: .p2align 8
+; GFX90A-TGSPLIT-NEXT: ; %bb.2:
+; GFX90A-TGSPLIT-NEXT: .LBB0_0: ; %main_body
+; GFX90A-TGSPLIT-NEXT: s_mov_b32 m0, s12
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX90A-TGSPLIT-NEXT: buffer_load_dword v0, s[8:11], 0 offen lds
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v0, s13
+; GFX90A-TGSPLIT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x3c
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_barrier
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: ds_read_b32 v0, v0
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v1, 0
+; GFX90A-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-TGSPLIT-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX942-LABEL: barrier_release:
+; GFX942: ; %bb.1:
+; GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX942-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_branch .LBB0_0
+; GFX942-NEXT: .p2align 8
+; GFX942-NEXT: ; %bb.2:
+; GFX942-NEXT: .LBB0_0: ; %main_body
+; GFX942-NEXT: s_mov_b32 m0, s12
+; GFX942-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX942-NEXT: buffer_load_dword v0, s[8:11], 0 offen lds
+; GFX942-NEXT: v_mov_b32_e32 v0, s13
+; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x3c
+; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_barrier
+; GFX942-NEXT: ds_read_b32 v0, v0
+; GFX942-NEXT: v_mov_b32_e32 v1, 0
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX942-NEXT: s_endpgm
+;
+; GFX942-TGSPLIT-LABEL: barrier_release:
+; GFX942-TGSPLIT: ; %bb.1:
+; GFX942-TGSPLIT-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX942-TGSPLIT-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX942-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-TGSPLIT-NEXT: s_branch .LBB0_0
+; GFX942-TGSPLIT-NEXT: .p2align 8
+; GFX942-TGSPLIT-NEXT: ; %bb.2:
+; GFX942-TGSPLIT-NEXT: .LBB0_0: ; %main_body
+; GFX942-TGSPLIT-NEXT: s_mov_b32 m0, s12
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX942-TGSPLIT-NEXT: buffer_load_dword v0, s[8:11], 0 offen lds
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v0, s13
+; GFX942-TGSPLIT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x3c
+; GFX942-TGSPLIT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX942-TGSPLIT-NEXT: s_barrier
+; GFX942-TGSPLIT-NEXT: buffer_inv sc0
+; GFX942-TGSPLIT-NEXT: ds_read_b32 v0, v0
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v1, 0
+; GFX942-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-TGSPLIT-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX942-TGSPLIT-NEXT: s_endpgm
+;
+; GFX10WGP-LABEL: barrier_release:
+; GFX10WGP: ; %bb.0: ; %main_body
+; GFX10WGP-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; GFX10WGP-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX10WGP-NEXT: v_mov_b32_e32 v1, 0
+; GFX10WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10WGP-NEXT: s_mov_b32 m0, s12
+; GFX10WGP-NEXT: buffer_load_dword v0, s[8:11], 0 offen lds
+; GFX10WGP-NEXT: v_mov_b32_e32 v0, s13
+; GFX10WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10WGP-NEXT: s_barrier
+; GFX10WGP-NEXT: buffer_gl0_inv
+; GFX10WGP-NEXT: ds_read_b32 v0, v0
+; GFX10WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10WGP-NEXT: global_store_dword v1, v0, s[14:15]
+; GFX10WGP-NEXT: s_endpgm
+;
+; GFX10CU-LABEL: barrier_release:
+; GFX10CU: ; %bb.0: ; %main_body
+; GFX10CU-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; GFX10CU-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX10CU-NEXT: v_mov_b32_e32 v1, 0
+; GFX10CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10CU-NEXT: s_mov_b32 m0, s12
+; GFX10CU-NEXT: buffer_load_dword v0, s[8:11], 0 offen lds
+; GFX10CU-NEXT: v_mov_b32_e32 v0, s13
+; GFX10CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10CU-NEXT: s_barrier
+; GFX10CU-NEXT: ds_read_b32 v0, v0
+; GFX10CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10CU-NEXT: global_store_dword v1, v0, s[14:15]
+; GFX10CU-NEXT: s_endpgm
+ ptr addrspace(3) inreg %lds1,
+ ptr addrspace(3) inreg %lds2,
+ ptr addrspace(1) %dummy2) {
+main_body:
+ call void @llvm.amdgcn.raw.buffer.load.lds(<4 x i32> %rsrc, ptr addrspace(3) %lds1, i32 4, i32 2048, i32 0, i32 0, i32 0), !alias.scope !102
+ fence syncscope("workgroup") release
+ tail call void @llvm.amdgcn.s.barrier()
+ fence syncscope("workgroup") acquire
+ %load = load i32, ptr addrspace(3) %lds2, align 4, !noalias !105
+ store i32 %load, ptr addrspace(1) %dummy2, align 4, !noalias !105
+ ret void
+}
+
+define amdgpu_kernel void @fence_fence(<4 x i32> inreg %rsrc,
+; GFX900-LABEL: fence_fence:
+; GFX900: ; %bb.0: ; %main_body
+; GFX900-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX900-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX900-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x3c
+; GFX900-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX900-NEXT: v_mov_b32_e32 v0, 0
+; GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; GFX900-NEXT: s_mov_b32 m0, s6
+; GFX900-NEXT: s_nop 0
+; GFX900-NEXT: buffer_load_dword v1, s[0:3], 0 offen lds
+; GFX900-NEXT: v_mov_b32_e32 v1, 1
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: global_store_dword v0, v1, s[8:9]
+; GFX900-NEXT: global_load_dword v1, v0, s[8:9]
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_mov_b32_e32 v1, s7
+; GFX900-NEXT: ds_read_b32 v1, v1
+; GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; GFX900-NEXT: global_store_dword v0, v1, s[10:11]
+; GFX900-NEXT: s_endpgm
+;
+; GFX90A-LABEL: fence_fence:
+; GFX90A: ; %bb.1:
+; GFX90A-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX90A-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_branch .LBB1_0
+; GFX90A-NEXT: .p2align 8
+; GFX90A-NEXT: ; %bb.2:
+; GFX90A-NEXT: .LBB1_0: ; %main_body
+; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x3c
+; GFX90A-NEXT: s_mov_b32 m0, s12
+; GFX90A-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX90A-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-NEXT: buffer_load_dword v1, s[8:11], 0 offen lds
+; GFX90A-NEXT: v_mov_b32_e32 v1, 1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90A-NEXT: global_load_dword v1, v0, s[0:1]
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v1, s13
+; GFX90A-NEXT: ds_read_b32 v1, v1
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX90A-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: fence_fence:
+; GFX90A-TGSPLIT: ; %bb.1:
+; GFX90A-TGSPLIT-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX90A-TGSPLIT-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX90A-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_branch .LBB1_0
+; GFX90A-TGSPLIT-NEXT: .p2align 8
+; GFX90A-TGSPLIT-NEXT: ; %bb.2:
+; GFX90A-TGSPLIT-NEXT: .LBB1_0: ; %main_body
+; GFX90A-TGSPLIT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x3c
+; GFX90A-TGSPLIT-NEXT: s_mov_b32 m0, s12
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-TGSPLIT-NEXT: buffer_load_dword v1, s[8:11], 0 offen lds
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v1, 1
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-TGSPLIT-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90A-TGSPLIT-NEXT: global_load_dword v1, v0, s[0:1] glc
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v1, s13
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: ds_read_b32 v1, v1
+; GFX90A-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-TGSPLIT-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX942-LABEL: fence_fence:
+; GFX942: ; %bb.1:
+; GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX942-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_branch .LBB1_0
+; GFX942-NEXT: .p2align 8
+; GFX942-NEXT: ; %bb.2:
+; GFX942-NEXT: .LBB1_0: ; %main_body
+; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x3c
+; GFX942-NEXT: s_mov_b32 m0, s12
+; GFX942-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: buffer_load_dword v1, s[8:11], 0 offen lds
+; GFX942-NEXT: v_mov_b32_e32 v1, 1
+; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX942-NEXT: global_store_dword v0, v1, s[0:1] sc0
+; GFX942-NEXT: global_load_dword v1, v0, s[0:1] sc0
+; GFX942-NEXT: s_waitcnt vmcnt(0)
+; GFX942-NEXT: v_mov_b32_e32 v1, s13
+; GFX942-NEXT: ds_read_b32 v1, v1
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX942-NEXT: s_endpgm
+;
+; GFX942-TGSPLIT-LABEL: fence_fence:
+; GFX942-TGSPLIT: ; %bb.1:
+; GFX942-TGSPLIT-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX942-TGSPLIT-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX942-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-TGSPLIT-NEXT: s_branch .LBB1_0
+; GFX942-TGSPLIT-NEXT: .p2align 8
+; GFX942-TGSPLIT-NEXT: ; %bb.2:
+; GFX942-TGSPLIT-NEXT: .LBB1_0: ; %main_body
+; GFX942-TGSPLIT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x3c
+; GFX942-TGSPLIT-NEXT: s_mov_b32 m0, s12
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-TGSPLIT-NEXT: buffer_load_dword v1, s[8:11], 0 offen lds
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v1, 1
+; GFX942-TGSPLIT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX942-TGSPLIT-NEXT: global_store_dword v0, v1, s[0:1] sc0
+; GFX942-TGSPLIT-NEXT: global_load_dword v1, v0, s[0:1] sc0
+; GFX942-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v1, s13
+; GFX942-TGSPLIT-NEXT: buffer_inv sc0
+; GFX942-TGSPLIT-NEXT: ds_read_b32 v1, v1
+; GFX942-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-TGSPLIT-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX942-TGSPLIT-NEXT: s_endpgm
+;
+; GFX10WGP-LABEL: fence_fence:
+; GFX10WGP: ; %bb.0: ; %main_body
+; GFX10WGP-NEXT: s_clause 0x2
+; GFX10WGP-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX10WGP-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10WGP-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x3c
+; GFX10WGP-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX10WGP-NEXT: v_mov_b32_e32 v1, 0
+; GFX10WGP-NEXT: v_mov_b32_e32 v2, 1
+; GFX10WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10WGP-NEXT: s_mov_b32 m0, s6
+; GFX10WGP-NEXT: buffer_load_dword v0, s[0:3], 0 offen lds
+; GFX10WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10WGP-NEXT: global_store_dword v1, v2, s[8:9]
+; GFX10WGP-NEXT: global_load_dword v0, v1, s[8:9] glc
+; GFX10WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10WGP-NEXT: v_mov_b32_e32 v0, s7
+; GFX10WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10WGP-NEXT: buffer_gl0_inv
+; GFX10WGP-NEXT: ds_read_b32 v0, v0
+; GFX10WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10WGP-NEXT: global_store_dword v1, v0, s[10:11]
+; GFX10WGP-NEXT: s_endpgm
+;
+; GFX10CU-LABEL: fence_fence:
+; GFX10CU: ; %bb.0: ; %main_body
+; GFX10CU-NEXT: s_clause 0x2
+; GFX10CU-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX10CU-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10CU-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x3c
+; GFX10CU-NEXT: v_mov_b32_e32 v0, 0x800
+; GFX10CU-NEXT: v_mov_b32_e32 v1, 0
+; GFX10CU-NEXT: v_mov_b32_e32 v2, 1
+; GFX10CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10CU-NEXT: s_mov_b32 m0, s6
+; GFX10CU-NEXT: buffer_load_dword v0, s[0:3], 0 offen lds
+; GFX10CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10CU-NEXT: global_store_dword v1, v2, s[8:9]
+; GFX10CU-NEXT: global_load_dword v0, v1, s[8:9]
+; GFX10CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10CU-NEXT: v_mov_b32_e32 v0, s7
+; GFX10CU-NEXT: ds_read_b32 v0, v0
+; GFX10CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10CU-NEXT: global_store_dword v1, v0, s[10:11]
+; GFX10CU-NEXT: s_endpgm
+ ptr addrspace(3) inreg %lds1,
+ ptr addrspace(3) inreg %lds2,
+ ptr addrspace(1) %flag,
+ ptr addrspace(1) %dummy2) {
+main_body:
+ call void @llvm.amdgcn.raw.buffer.load.lds(<4 x i32> %rsrc, ptr addrspace(3) %lds1, i32 4, i32 2048, i32 0, i32 0, i32 0), !alias.scope !102
+ fence syncscope("workgroup") release
+ store atomic i32 1, ptr addrspace(1) %flag syncscope("workgroup") monotonic, align 4, !noalias !105
+ %unused_flag = load atomic i32, ptr addrspace(1) %flag syncscope("workgroup") monotonic, align 4, !noalias !105
+ fence syncscope("workgroup") acquire
+ %load = load i32, ptr addrspace(3) %lds2, align 4, !noalias !105
+ store i32 %load, ptr addrspace(1) %dummy2, align 4, !noalias !105
+ ret void
+}
+
+define amdgpu_kernel void @release_acquire(<4 x i32> inreg %rsrc,
+; GFX900-LABEL: release_acquire:
+; GFX900: ; %bb.0: ; %main_body
+; GFX900-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX900-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX900-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x3c
+; GFX900-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX900-NEXT: v_mov_b32_e32 v0, 0
+; GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; GFX900-NEXT: s_mov_b32 m0, s6
+; GFX900-NEXT: s_nop 0
+; GFX900-NEXT: buffer_load_dword v1, s[0:3], 0 offen lds
+; GFX900-NEXT: v_mov_b32_e32 v1, 1
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: global_store_dword v0, v1, s[8:9]
+; GFX900-NEXT: global_load_dword v1, v0, s[8:9]
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_mov_b32_e32 v1, s7
+; GFX900-NEXT: ds_read_b32 v1, v1
+; GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; GFX900-NEXT: global_store_dword v0, v1, s[10:11]
+; GFX900-NEXT: s_endpgm
+;
+; GFX90A-LABEL: release_acquire:
+; GFX90A: ; %bb.1:
+; GFX90A-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX90A-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: s_branch .LBB2_0
+; GFX90A-NEXT: .p2align 8
+; GFX90A-NEXT: ; %bb.2:
+; GFX90A-NEXT: .LBB2_0: ; %main_body
+; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x3c
+; GFX90A-NEXT: s_mov_b32 m0, s12
+; GFX90A-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX90A-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-NEXT: buffer_load_dword v1, s[8:11], 0 offen lds
+; GFX90A-NEXT: v_mov_b32_e32 v1, 1
+; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90A-NEXT: global_load_dword v1, v0, s[0:1]
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v1, s13
+; GFX90A-NEXT: ds_read_b32 v1, v1
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX90A-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: release_acquire:
+; GFX90A-TGSPLIT: ; %bb.1:
+; GFX90A-TGSPLIT-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX90A-TGSPLIT-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX90A-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_branch .LBB2_0
+; GFX90A-TGSPLIT-NEXT: .p2align 8
+; GFX90A-TGSPLIT-NEXT: ; %bb.2:
+; GFX90A-TGSPLIT-NEXT: .LBB2_0: ; %main_body
+; GFX90A-TGSPLIT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x3c
+; GFX90A-TGSPLIT-NEXT: s_mov_b32 m0, s12
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-TGSPLIT-NEXT: buffer_load_dword v1, s[8:11], 0 offen lds
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v1, 1
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX90A-TGSPLIT-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90A-TGSPLIT-NEXT: global_load_dword v1, v0, s[0:1] glc
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: v_mov_b32_e32 v1, s13
+; GFX90A-TGSPLIT-NEXT: ds_read_b32 v1, v1
+; GFX90A-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-TGSPLIT-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX942-LABEL: release_acquire:
+; GFX942: ; %bb.1:
+; GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX942-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_branch .LBB2_0
+; GFX942-NEXT: .p2align 8
+; GFX942-NEXT: ; %bb.2:
+; GFX942-NEXT: .LBB2_0: ; %main_body
+; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x3c
+; GFX942-NEXT: s_mov_b32 m0, s12
+; GFX942-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: buffer_load_dword v1, s[8:11], 0 offen lds
+; GFX942-NEXT: v_mov_b32_e32 v1, 1
+; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX942-NEXT: global_store_dword v0, v1, s[0:1] sc0
+; GFX942-NEXT: global_load_dword v1, v0, s[0:1] sc0
+; GFX942-NEXT: s_waitcnt vmcnt(0)
+; GFX942-NEXT: v_mov_b32_e32 v1, s13
+; GFX942-NEXT: ds_read_b32 v1, v1
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX942-NEXT: s_endpgm
+;
+; GFX942-TGSPLIT-LABEL: release_acquire:
+; GFX942-TGSPLIT: ; %bb.1:
+; GFX942-TGSPLIT-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX942-TGSPLIT-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x10
+; GFX942-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-TGSPLIT-NEXT: s_branch .LBB2_0
+; GFX942-TGSPLIT-NEXT: .p2align 8
+; GFX942-TGSPLIT-NEXT: ; %bb.2:
+; GFX942-TGSPLIT-NEXT: .LBB2_0: ; %main_body
+; GFX942-TGSPLIT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x3c
+; GFX942-TGSPLIT-NEXT: s_mov_b32 m0, s12
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-TGSPLIT-NEXT: buffer_load_dword v1, s[8:11], 0 offen lds
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v1, 1
+; GFX942-TGSPLIT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX942-TGSPLIT-NEXT: global_store_dword v0, v1, s[0:1] sc0
+; GFX942-TGSPLIT-NEXT: global_load_dword v1, v0, s[0:1] sc0
+; GFX942-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX942-TGSPLIT-NEXT: buffer_inv sc0
+; GFX942-TGSPLIT-NEXT: v_mov_b32_e32 v1, s13
+; GFX942-TGSPLIT-NEXT: ds_read_b32 v1, v1
+; GFX942-TGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-TGSPLIT-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX942-TGSPLIT-NEXT: s_endpgm
+;
+; GFX10WGP-LABEL: release_acquire:
+; GFX10WGP: ; %bb.0: ; %main_body
+; GFX10WGP-NEXT: s_clause 0x2
+; GFX10WGP-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX10WGP-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10WGP-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x3c
+; GFX10WGP-NEXT: v_mov_b32_e32 v0, 0
+; GFX10WGP-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX10WGP-NEXT: v_mov_b32_e32 v2, 1
+; GFX10WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10WGP-NEXT: s_mov_b32 m0, s6
+; GFX10WGP-NEXT: buffer_load_dword v1, s[0:3], 0 offen lds
+; GFX10WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10WGP-NEXT: global_store_dword v0, v2, s[8:9]
+; GFX10WGP-NEXT: global_load_dword v1, v0, s[8:9] glc
+; GFX10WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10WGP-NEXT: buffer_gl0_inv
+; GFX10WGP-NEXT: v_mov_b32_e32 v1, s7
+; GFX10WGP-NEXT: ds_read_b32 v1, v1
+; GFX10WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10WGP-NEXT: global_store_dword v0, v1, s[10:11]
+; GFX10WGP-NEXT: s_endpgm
+;
+; GFX10CU-LABEL: release_acquire:
+; GFX10CU: ; %bb.0: ; %main_body
+; GFX10CU-NEXT: s_clause 0x2
+; GFX10CU-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX10CU-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10CU-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x3c
+; GFX10CU-NEXT: v_mov_b32_e32 v0, 0
+; GFX10CU-NEXT: v_mov_b32_e32 v1, 0x800
+; GFX10CU-NEXT: v_mov_b32_e32 v2, 1
+; GFX10CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10CU-NEXT: s_mov_b32 m0, s6
+; GFX10CU-NEXT: buffer_load_dword v1, s[0:3], 0 offen lds
+; GFX10CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10CU-NEXT: global_store_dword v0, v2, s[8:9]
+; GFX10CU-NEXT: global_load_dword v1, v0, s[8:9]
+; GFX10CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10CU-NEXT: ds_read_b32 v1, v1
+; GFX10CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10CU-NEXT: global_store_dword v0, v1, s[10:11]
+; GFX10CU-NEXT: s_endpgm
+ ptr addrspace(3) inreg %lds1,
+ ptr addrspace(3) inreg %lds2,
+ ptr addrspace(1) %flag,
+ ptr addrspace(1) %dummy2) {
+main_body:
+ call void @llvm.amdgcn.raw.buffer.load.lds(<4 x i32> %rsrc, ptr addrspace(3) %lds1, i32 4, i32 2048, i32 0, i32 0, i32 0), !alias.scope !102
+ store atomic i32 1, ptr addrspace(1) %flag syncscope("workgroup") release, align 4, !noalias !105
+ %unused_flag = load atomic i32, ptr addrspace(1) %flag syncscope("workgroup") acquire, align 4, !noalias !105
+ %load = load i32, ptr addrspace(3) %lds2, align 4, !noalias !105
+ store i32 %load, ptr addrspace(1) %dummy2, align 4, !noalias !105
+ ret void
+}
+
+!100 = !{!100}
+!101 = !{!101, !100}
+!102 = !{!101}
+!103 = !{!103, !100}
+!104 = !{!103}
+!105 = !{!101, !103}
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
index 4f81d35..ceed41f 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
@@ -10,9 +10,9 @@
; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,verify,loop-mssa(loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,verify,loop-mssa(loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
define void @empty() {
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.bitop3.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.bitop3.ll
index ea8513f..c985e76 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.bitop3.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.bitop3.ll
@@ -1,6 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX950-SDAG %s
-; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX950-GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX950,GFX950-SDAG %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX950,GFX950-GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX1250,GFX1250-SDAG,GFX1250-TRUE16,GFX1250-SDG-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX1250,GFX1250-SDAG,GFX1250-FAKE16,GFX1250-SDG-FAKE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX1250,GFX1250-GISEL,GFX1250-TRUE16,GFX1250-GISEL-TRUE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX1250,GFX1250-GISEL,GFX1250-FAKE16,GFX1250-GISEL-FAKE16 %s
declare i32 @llvm.amdgcn.bitop3.i32(i32, i32, i32, i32)
declare i16 @llvm.amdgcn.bitop3.i16(i16, i16, i16, i32)
@@ -26,23 +30,35 @@ define amdgpu_ps float @bitop3_b32_svv(i32 inreg %a, i32 %b, i32 %c) {
}
define amdgpu_ps float @bitop3_b32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
-; GCN-LABEL: bitop3_b32_ssv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_mov_b32_e32 v1, s1
-; GCN-NEXT: v_bitop3_b32 v0, s0, v1, v0 bitop3:0x11
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: bitop3_b32_ssv:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_mov_b32_e32 v1, s1
+; GFX950-NEXT: v_bitop3_b32 v0, s0, v1, v0 bitop3:0x11
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: bitop3_b32_ssv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_bitop3_b32 v0, s0, s1, v0 bitop3:0x11
+; GFX1250-NEXT: ; return to shader part epilog
%ret = call i32 @llvm.amdgcn.bitop3.i32(i32 %a, i32 %b, i32 %c, i32 17)
%ret_cast = bitcast i32 %ret to float
ret float %ret_cast
}
define amdgpu_ps float @bitop3_b32_sss(i32 inreg %a, i32 inreg %b, i32 inreg %c) {
-; GCN-LABEL: bitop3_b32_sss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_mov_b32_e32 v0, s1
-; GCN-NEXT: v_mov_b32_e32 v1, s2
-; GCN-NEXT: v_bitop3_b32 v0, s0, v0, v1 bitop3:0x12
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: bitop3_b32_sss:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_mov_b32_e32 v0, s1
+; GFX950-NEXT: v_mov_b32_e32 v1, s2
+; GFX950-NEXT: v_bitop3_b32 v0, s0, v0, v1 bitop3:0x12
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: bitop3_b32_sss:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_bitop3_b32 v0, s0, s1, v0 bitop3:0x12
+; GFX1250-NEXT: ; return to shader part epilog
%ret = call i32 @llvm.amdgcn.bitop3.i32(i32 %a, i32 %b, i32 %c, i32 18)
%ret_cast = bitcast i32 %ret to float
ret float %ret_cast
@@ -60,6 +76,11 @@ define amdgpu_ps float @bitop3_b32_vvi(i32 %a, i32 %b) {
; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, 0x3e8
; GFX950-GISEL-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0x13
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: bitop3_b32_vvi:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_bitop3_b32 v0, v0, v1, 0x3e8 bitop3:0x13
+; GFX1250-NEXT: ; return to shader part epilog
%ret = call i32 @llvm.amdgcn.bitop3.i32(i32 %a, i32 %b, i32 1000, i32 19)
%ret_cast = bitcast i32 %ret to float
ret float %ret_cast
@@ -79,6 +100,20 @@ define amdgpu_ps float @bitop3_b32_vii(i32 %a) {
; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, 0x3e8
; GFX950-GISEL-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0x14
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: bitop3_b32_vii:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_movk_i32 s0, 0x7d0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, v0, s0, 0x3e8 bitop3:0x14
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: bitop3_b32_vii:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e8
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_bitop3_b32 v0, v0, 0x7d0, v1 bitop3:0x14
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%ret = call i32 @llvm.amdgcn.bitop3.i32(i32 %a, i32 2000, i32 1000, i32 20)
%ret_cast = bitcast i32 %ret to float
ret float %ret_cast
@@ -102,49 +137,109 @@ define amdgpu_ps float @bitop3_b32_iii() {
; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, 0x3e8
; GFX950-GISEL-NEXT: v_bitop3_b32 v0, v0, v1, v2 bitop3:0x15
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDAG-LABEL: bitop3_b32_iii:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, 0x3e8
+; GFX1250-SDAG-NEXT: s_movk_i32 s0, 0xbb8
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_bitop3_b32 v0, s0, 0x7d0, v0 bitop3:0x15
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: bitop3_b32_iii:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0x7d0
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e8
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_bitop3_b32 v0, 0xbb8, v0, v1 bitop3:0x15
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%ret = call i32 @llvm.amdgcn.bitop3.i32(i32 3000, i32 2000, i32 1000, i32 21)
%ret_cast = bitcast i32 %ret to float
ret float %ret_cast
}
define amdgpu_ps half @bitop3_b16_vvv(i16 %a, i16 %b, i16 %c) {
-; GCN-LABEL: bitop3_b16_vvv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:0xf
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: bitop3_b16_vvv:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:0xf
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-TRUE16-LABEL: bitop3_b16_vvv:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v1.l, v2.l bitop3:0xf
+; GFX1250-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-FAKE16-LABEL: bitop3_b16_vvv:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:0xf
+; GFX1250-FAKE16-NEXT: ; return to shader part epilog
%ret = call i16 @llvm.amdgcn.bitop3.i16(i16 %a, i16 %b, i16 %c, i32 15)
%ret_cast = bitcast i16 %ret to half
ret half %ret_cast
}
define amdgpu_ps half @bitop3_b16_svv(i16 inreg %a, i16 %b, i16 %c) {
-; GCN-LABEL: bitop3_b16_svv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_bitop3_b16 v0, s0, v0, v1 bitop3:0x10
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: bitop3_b16_svv:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_bitop3_b16 v0, s0, v0, v1 bitop3:0x10
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-TRUE16-LABEL: bitop3_b16_svv:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_bitop3_b16 v0.l, s0, v0.l, v1.l bitop3:0x10
+; GFX1250-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-FAKE16-LABEL: bitop3_b16_svv:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_bitop3_b16 v0, s0, v0, v1 bitop3:0x10
+; GFX1250-FAKE16-NEXT: ; return to shader part epilog
%ret = call i16 @llvm.amdgcn.bitop3.i16(i16 %a, i16 %b, i16 %c, i32 16)
%ret_cast = bitcast i16 %ret to half
ret half %ret_cast
}
define amdgpu_ps half @bitop3_b16_ssv(i16 inreg %a, i16 inreg %b, i16 %c) {
-; GCN-LABEL: bitop3_b16_ssv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_mov_b32_e32 v1, s1
-; GCN-NEXT: v_bitop3_b16 v0, s0, v1, v0 bitop3:0x11
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: bitop3_b16_ssv:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_mov_b32_e32 v1, s1
+; GFX950-NEXT: v_bitop3_b16 v0, s0, v1, v0 bitop3:0x11
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-TRUE16-LABEL: bitop3_b16_ssv:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_bitop3_b16 v0.l, s0, s1, v0.l bitop3:0x11
+; GFX1250-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-FAKE16-LABEL: bitop3_b16_ssv:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_bitop3_b16 v0, s0, s1, v0 bitop3:0x11
+; GFX1250-FAKE16-NEXT: ; return to shader part epilog
%ret = call i16 @llvm.amdgcn.bitop3.i16(i16 %a, i16 %b, i16 %c, i32 17)
%ret_cast = bitcast i16 %ret to half
ret half %ret_cast
}
define amdgpu_ps half @bitop3_b16_sss(i16 inreg %a, i16 inreg %b, i16 inreg %c) {
-; GCN-LABEL: bitop3_b16_sss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_mov_b32_e32 v0, s1
-; GCN-NEXT: v_mov_b32_e32 v1, s2
-; GCN-NEXT: v_bitop3_b16 v0, s0, v0, v1 bitop3:0x12
-; GCN-NEXT: ; return to shader part epilog
+; GFX950-LABEL: bitop3_b16_sss:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: v_mov_b32_e32 v0, s1
+; GFX950-NEXT: v_mov_b32_e32 v1, s2
+; GFX950-NEXT: v_bitop3_b16 v0, s0, v0, v1 bitop3:0x12
+; GFX950-NEXT: ; return to shader part epilog
+;
+; GFX1250-TRUE16-LABEL: bitop3_b16_sss:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_mov_b16_e32 v0.l, s2
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_bitop3_b16 v0.l, s0, s1, v0.l bitop3:0x12
+; GFX1250-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-FAKE16-LABEL: bitop3_b16_sss:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_bitop3_b16 v0, s0, s1, v0 bitop3:0x12
+; GFX1250-FAKE16-NEXT: ; return to shader part epilog
%ret = call i16 @llvm.amdgcn.bitop3.i16(i16 %a, i16 %b, i16 %c, i32 18)
%ret_cast = bitcast i16 %ret to half
ret half %ret_cast
@@ -162,6 +257,16 @@ define amdgpu_ps half @bitop3_b16_vvi(i16 %a, i16 %b) {
; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, 0x3e8
; GFX950-GISEL-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:0x13
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-TRUE16-LABEL: bitop3_b16_vvi:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v1.l, 0x3e8 bitop3:0x13
+; GFX1250-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-FAKE16-LABEL: bitop3_b16_vvi:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_bitop3_b16 v0, v0, v1, 0x3e8 bitop3:0x13
+; GFX1250-FAKE16-NEXT: ; return to shader part epilog
%ret = call i16 @llvm.amdgcn.bitop3.i16(i16 %a, i16 %b, i16 1000, i32 19)
%ret_cast = bitcast i16 %ret to half
ret half %ret_cast
@@ -181,6 +286,34 @@ define amdgpu_ps half @bitop3_b16_vii(i16 %a) {
; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, 0x3e8
; GFX950-GISEL-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:0x14
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDG-TRUE16-LABEL: bitop3_b16_vii:
+; GFX1250-SDG-TRUE16: ; %bb.0:
+; GFX1250-SDG-TRUE16-NEXT: v_mov_b16_e32 v1.l, 0x7d0
+; GFX1250-SDG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDG-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v1.l, 0x3e8 bitop3:0x14
+; GFX1250-SDG-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDG-FAKE16-LABEL: bitop3_b16_vii:
+; GFX1250-SDG-FAKE16: ; %bb.0:
+; GFX1250-SDG-FAKE16-NEXT: s_movk_i32 s0, 0x7d0
+; GFX1250-SDG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDG-FAKE16-NEXT: v_bitop3_b16 v0, v0, s0, 0x3e8 bitop3:0x14
+; GFX1250-SDG-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-TRUE16-LABEL: bitop3_b16_vii:
+; GFX1250-GISEL-TRUE16: ; %bb.0:
+; GFX1250-GISEL-TRUE16-NEXT: v_mov_b16_e32 v0.h, 0x3e8
+; GFX1250-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, 0x7d0, v0.h bitop3:0x14
+; GFX1250-GISEL-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-FAKE16-LABEL: bitop3_b16_vii:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0x3e8
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_bitop3_b16 v0, v0, 0x7d0, v1 bitop3:0x14
+; GFX1250-GISEL-FAKE16-NEXT: ; return to shader part epilog
%ret = call i16 @llvm.amdgcn.bitop3.i16(i16 %a, i16 2000, i16 1000, i32 20)
%ret_cast = bitcast i16 %ret to half
ret half %ret_cast
@@ -203,6 +336,38 @@ define amdgpu_ps half @bitop3_b16_iii() {
; GFX950-GISEL-NEXT: v_mov_b32_e32 v2, 0x3e8
; GFX950-GISEL-NEXT: v_bitop3_b16 v0, v0, v1, v2 bitop3:0x15
; GFX950-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDG-TRUE16-LABEL: bitop3_b16_iii:
+; GFX1250-SDG-TRUE16: ; %bb.0:
+; GFX1250-SDG-TRUE16-NEXT: v_mov_b16_e32 v0.l, 0x7d0
+; GFX1250-SDG-TRUE16-NEXT: v_mov_b16_e32 v0.h, 0xbb8
+; GFX1250-SDG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDG-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.h, v0.l, 0x3e8 bitop3:0x15
+; GFX1250-SDG-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-SDG-FAKE16-LABEL: bitop3_b16_iii:
+; GFX1250-SDG-FAKE16: ; %bb.0:
+; GFX1250-SDG-FAKE16-NEXT: v_mov_b32_e32 v0, 0x3e8
+; GFX1250-SDG-FAKE16-NEXT: s_movk_i32 s0, 0xbb8
+; GFX1250-SDG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDG-FAKE16-NEXT: v_bitop3_b16 v0, s0, 0x7d0, v0 bitop3:0x15
+; GFX1250-SDG-FAKE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-TRUE16-LABEL: bitop3_b16_iii:
+; GFX1250-GISEL-TRUE16: ; %bb.0:
+; GFX1250-GISEL-TRUE16-NEXT: v_mov_b16_e32 v0.l, 0x7d0
+; GFX1250-GISEL-TRUE16-NEXT: v_mov_b16_e32 v0.h, 0x3e8
+; GFX1250-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-TRUE16-NEXT: v_bitop3_b16 v0.l, 0xbb8, v0.l, v0.h bitop3:0x15
+; GFX1250-GISEL-TRUE16-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-FAKE16-LABEL: bitop3_b16_iii:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, 0x7d0
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0x3e8
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_bitop3_b16 v0, 0xbb8, v0, v1 bitop3:0x15
+; GFX1250-GISEL-FAKE16-NEXT: ; return to shader part epilog
%ret = call i16 @llvm.amdgcn.bitop3.i16(i16 3000, i16 2000, i16 1000, i32 21)
%ret_cast = bitcast i16 %ret to half
ret half %ret_cast
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
index 43c8d83..fd51759 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
@@ -1,10 +1,188 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-TRUE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+declare i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float, float, i32, i1)
+declare i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float, i32, i32, i32)
declare float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32, i32)
+define i32 @test_cvt_pk_fp8_f32_word0(float %x, float %y, i32 %old) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.l, v0, v1 clamp
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 clamp
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 clamp
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %x, float %y, i32 %old, i1 false)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_pk_fp8_f32_word1(float %x, float %y, i32 %old) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.h, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %x, float %y, i32 %old, i1 true)
+ ret i32 %ret
+}
+
+define amdgpu_cs void @test_cvt_pk_fp8_f32_word1_dpp(i32 %a, float %y, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-TRUE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.h, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-TRUE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-TRUE16-NEXT: s_endpgm
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %a, i32 228, i32 15, i32 15, i1 1)
+ %tmp1 = bitcast i32 %tmp0 to float
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %tmp1, float %y, i32 %old, i1 true)
+ store i32 %ret, ptr addrspace(1) %out
+ ret void
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte0(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 0)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte1(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:1 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 1)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte2(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:2 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 2)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte3(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:3 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 3)
+ ret i32 %ret
+}
+
+define amdgpu_cs void @test_cvt_sr_fp8_f32_byte1_dpp(i32 %a, i32 %r, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-TRUE16-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-TRUE16-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-TRUE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-TRUE16-NEXT: s_endpgm
+;
+; GFX1250-FAKE16-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-FAKE16-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %a, i32 228, i32 15, i32 15, i1 1)
+ %tmp1 = bitcast i32 %tmp0 to float
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %tmp1, i32 %r, i32 %old, i32 1)
+ store i32 %ret, ptr addrspace(1) %out
+ ret void
+}
+
define float @test_cvt_f32_fp8_e5m3_byte0(i32 %a) {
; GFX1250-LABEL: test_cvt_f32_fp8_e5m3_byte0:
; GFX1250: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll
new file mode 100644
index 0000000..6ccfad7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll
@@ -0,0 +1,539 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG-REAL16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL-REAL16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL-FAKE16 %s
+
+declare i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half>)
+declare i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half>)
+declare i32 @llvm.amdgcn.cvt.sr.bf8.f16(half, i32, i32, i32)
+declare i32 @llvm.amdgcn.cvt.sr.fp8.f16(half, i32, i32, i32)
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_v(<2 x half> %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v0.l, v0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[2:3], v0, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v0.l, v0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[4:5], v0
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[4:5], v0, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_s(<2 x half> inreg %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, s0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, s0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, s0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, s0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_l(ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, 0x56400000
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, 0x56400000
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, 0x56400000
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, 0x56400000
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> <half 0.0, half 100.0>)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_v(<2 x half> %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v0.l, v0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[2:3], v0, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v0.l, v0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[4:5], v0
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[4:5], v0, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_s(<2 x half> inreg %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, s0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, s0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, s0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, s0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_l(ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, 0x56400000
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, 0x56400000
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, 0x56400000
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, 0x56400000
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> <half 0.0, half 100.0>)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte0(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte1(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 1)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte2(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 2)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte3(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 3)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_hi_byte0(<2 x half> %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.h, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_lshrrev_b32 v0, 16, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-REAL16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %a.1 = extractelement <2 x half> %a, i32 1
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a.1, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte0(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte1(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 1)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte2(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 2)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte3(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 3)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_hi_byte0(<2 x half> %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.h, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_lshrrev_b32 v0, 16, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-REAL16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %a.1 = extractelement <2 x half> %a, i32 1
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a.1, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll
new file mode 100644
index 0000000..2179800
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll
@@ -0,0 +1,64 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+
+declare <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float, float, i32) #0
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvv(float %src0, float %src1, i32 %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_sss(float inreg %src0, float inreg %src1, i32 inreg %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_sss:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, s0, s1, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvi(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, v0, v1, 0x10002
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 65538) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvi_mods(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvi_mods:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, -v0, |v1|, 1
+; GCN-NEXT: ; return to shader part epilog
+ %s0 = fneg float %src0
+ %s1 = call float @llvm.fabs.f32(float %src1) #0
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %s0, float %s1, i32 1) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_ssi(float inreg %src0, float inreg %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_ssi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, s0, s1, 1
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 1) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+declare float @llvm.fabs.f32(float) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll
new file mode 100644
index 0000000..4309cfbe
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -global-isel-abort=2 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f16_fp8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-SDAG-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f16_fp8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-GISEL-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp8(<2 x i32> %src, i32 %scale, i32 1)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f16_bf8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f16_bf8 v[4:7], v[0:1], v2
+; GFX1250-SDAG-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f16_bf8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f16_bf8 v[4:7], v[0:1], v2
+; GFX1250-GISEL-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.bf8(<2 x i32> %src, i32 %scale, i32 0)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_fp8_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp8(<2 x i32> %src, i32 %scale, i32 1)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_bf8_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_bf8 v[4:7], v[0:1], v2 scale_sel:2
+; GFX1250-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.bf8(<2 x i32> %src, i32 %scale, i32 2)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_f16_fp4_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_scale_pk8_f16_fp4 v[4:7], v0, v1 scale_sel:3
+; GFX1250-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp4(i32 %src, i32 %scale, i32 3)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_fp4_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_fp4 v[4:7], v0, v1 scale_sel:4
+; GFX1250-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 %src, i32 %scale, i32 4)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_fp8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v13, v4 :: v_dual_mov_b32 v12, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_fp8 v[4:11], v[0:1], v2 scale_sel:7
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_fp8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v13, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_fp8 v[4:11], v[0:1], v2 scale_sel:7
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> %src, i32 %scale, i32 7)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_bf8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v13, v4 :: v_dual_mov_b32 v12, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_bf8 v[4:11], v[0:1], v2
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_bf8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v13, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_bf8 v[4:11], v[0:1], v2
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> %src, i32 %scale, i32 0)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_fp4_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_fp4 v[4:11], v0, v1 scale_sel:1
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_fp4_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_fp4 v[4:11], v0, v1 scale_sel:1
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 %src, i32 %scale, i32 1)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 32
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
index 291a4e2..217c306 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
@@ -168,7 +168,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -179,7 +179,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -213,7 +213,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -225,7 +225,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -259,7 +259,7 @@ define float @test_cvt_scalef32_f32_fp8_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -269,7 +269,7 @@ define float @test_cvt_scalef32_f32_fp8_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -300,7 +300,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -311,7 +311,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -345,7 +345,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -357,7 +357,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -391,7 +391,7 @@ define float @test_cvt_scalef32_f32_bf8_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -401,7 +401,7 @@ define float @test_cvt_scalef32_f32_bf8_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -773,7 +773,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f32_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 1)
ret <2 x float> %ret
@@ -783,7 +783,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f32_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 2)
ret <2 x float> %ret
@@ -895,7 +895,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f16_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 1)
ret <2 x half> %ret
@@ -905,7 +905,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f16_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 2)
ret <2 x half> %ret
@@ -935,7 +935,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 1)
ret <2 x bfloat> %ret
@@ -945,7 +945,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 2)
ret <2 x bfloat> %ret
@@ -1602,7 +1602,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -1613,7 +1613,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -1647,7 +1647,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1659,7 +1659,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1693,7 +1693,7 @@ define float @test_cvt_scalef32_f32_fp8_byte1_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -1703,7 +1703,7 @@ define float @test_cvt_scalef32_f32_fp8_byte2_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -1734,7 +1734,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -1745,7 +1745,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -1779,7 +1779,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1791,7 +1791,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1825,7 +1825,7 @@ define float @test_cvt_scalef32_f32_bf8_byte1_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -1835,7 +1835,7 @@ define float @test_cvt_scalef32_f32_bf8_byte2_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -2032,7 +2032,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte1_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f32_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 1)
ret <2 x float> %ret
@@ -2042,7 +2042,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte2_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f32_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 2)
ret <2 x float> %ret
@@ -2112,7 +2112,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte1_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f16_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 1)
ret <2 x half> %ret
@@ -2122,7 +2122,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte2_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f16_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 2)
ret <2 x half> %ret
@@ -2152,7 +2152,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte1_inreg_src(i32 inreg %src, flo
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 1)
ret <2 x bfloat> %ret
@@ -2162,7 +2162,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte2_inreg_src(i32 inreg %src, flo
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 2)
ret <2 x bfloat> %ret
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll
new file mode 100644
index 0000000..82991ae
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+; xUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+
+; FIXME: GlobalISel does not work with bf16
+
+declare <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float, float, i32) #0
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvv(float %src0, float %src1, i32 %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_sss(float inreg %src0, float inreg %src1, i32 inreg %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_sss:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, s0, s1, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvi(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, v0, v1, 0x10002
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 65538) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvi_mods(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvi_mods:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, -v0, |v1|, 1
+; GCN-NEXT: ; return to shader part epilog
+ %s0 = fneg float %src0
+ %s1 = call float @llvm.fabs.f32(float %src1) #0
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %s0, float %s1, i32 1) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_ssi(float inreg %src0, float inreg %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_ssi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, s0, s1, 1
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 1) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+declare float @llvm.fabs.f32(float) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.async.to.lds.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.async.to.lds.ll
new file mode 100644
index 0000000..dd67910
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.async.to.lds.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
+declare void @llvm.amdgcn.global.load.async.to.lds.b8(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr, i32 %offset, i32 %cpol)
+declare void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr, i32 %offset, i32 %cpol)
+declare void @llvm.amdgcn.global.load.async.to.lds.b64(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr, i32 %offset, i32 %cpol)
+declare void @llvm.amdgcn.global.load.async.to.lds.b128(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr, i32 %offset, i32 %cpol)
+
+define amdgpu_ps void @global_load_async_to_lds_b8_vaddr(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-SDAG-LABEL: global_load_async_to_lds_b8_vaddr:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 32, v[0:1]
+; GFX1250-SDAG-NEXT: global_load_async_to_lds_b8 v2, v[0:1], off offset:16 th:TH_LOAD_NT
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_load_async_to_lds_b8_vaddr:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 32
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: global_load_async_to_lds_b8 v2, v[0:1], off offset:16 th:TH_LOAD_NT
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.load.async.to.lds.b8(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 1)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b8_saddr(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-LABEL: global_load_async_to_lds_b8_saddr:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b32_e32 v1, 32
+; GFX1250-NEXT: global_load_async_to_lds_b8 v0, v1, s[0:1] offset:16
+; GFX1250-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.load.async.to.lds.b8(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b32_vaddr(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-SDAG-LABEL: global_load_async_to_lds_b32_vaddr:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 32, v[0:1]
+; GFX1250-SDAG-NEXT: global_load_async_to_lds_b32 v2, v[0:1], off offset:16 th:TH_LOAD_HT scope:SCOPE_SE
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_load_async_to_lds_b32_vaddr:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 32
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: global_load_async_to_lds_b32 v2, v[0:1], off offset:16 th:TH_LOAD_HT scope:SCOPE_SE
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 10)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b32_saddr(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-LABEL: global_load_async_to_lds_b32_saddr:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b32_e32 v1, 32
+; GFX1250-NEXT: global_load_async_to_lds_b32 v0, v1, s[0:1] offset:16
+; GFX1250-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b64_vaddr(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-SDAG-LABEL: global_load_async_to_lds_b64_vaddr:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 32, v[0:1]
+; GFX1250-SDAG-NEXT: global_load_async_to_lds_b64 v2, v[0:1], off offset:16 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_load_async_to_lds_b64_vaddr:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 32
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: global_load_async_to_lds_b64 v2, v[0:1], off offset:16 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.load.async.to.lds.b64(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 22)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b64_saddr(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-LABEL: global_load_async_to_lds_b64_saddr:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b32_e32 v1, 32
+; GFX1250-NEXT: global_load_async_to_lds_b64 v0, v1, s[0:1] offset:16
+; GFX1250-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.load.async.to.lds.b64(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b128_vaddr(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-SDAG-LABEL: global_load_async_to_lds_b128_vaddr:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 32, v[0:1]
+; GFX1250-SDAG-NEXT: global_load_async_to_lds_b128 v2, v[0:1], off offset:16 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_load_async_to_lds_b128_vaddr:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 32
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: global_load_async_to_lds_b128 v2, v[0:1], off offset:16 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.load.async.to.lds.b128(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 27)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b128_saddr(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-LABEL: global_load_async_to_lds_b128_saddr:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b32_e32 v1, 32
+; GFX1250-NEXT: global_load_async_to_lds_b128 v0, v1, s[0:1] offset:16
+; GFX1250-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.load.async.to.lds.b128(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b32_saddr_scale_offset(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr, i32 %idx) {
+; GFX1250-LABEL: global_load_async_to_lds_b32_saddr_scale_offset:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: global_load_async_to_lds_b32 v0, v1, s[0:1] offset:16 scale_offset th:TH_LOAD_NT
+; GFX1250-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %gep = getelementptr i32, ptr addrspace(1) %gaddr, i64 %idxprom
+ call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 1)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b64_saddr_scale_offset(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr, i32 %idx) {
+; GFX1250-LABEL: global_load_async_to_lds_b64_saddr_scale_offset:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: global_load_async_to_lds_b64 v0, v1, s[0:1] offset:16 scale_offset th:TH_LOAD_NT
+; GFX1250-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i64 %idxprom
+ call void @llvm.amdgcn.global.load.async.to.lds.b64(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 1)
+ ret void
+}
+
+define amdgpu_ps void @global_load_async_to_lds_b64_saddr_no_scale_offset(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr, i32 %idx) {
+; GFX1250-SDAG-LABEL: global_load_async_to_lds_b64_saddr_no_scale_offset:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[2:3], v[2:3], 2, s[0:1]
+; GFX1250-SDAG-NEXT: global_load_async_to_lds_b64 v0, v[2:3], off offset:16 th:TH_LOAD_NT
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_load_async_to_lds_b64_saddr_no_scale_offset:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX1250-GISEL-NEXT: v_lshlrev_b64_e32 v[2:3], 2, v[2:3]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v4, v2
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, v5, v3, vcc_lo
+; GFX1250-GISEL-NEXT: global_load_async_to_lds_b64 v0, v[2:3], off offset:16 th:TH_LOAD_NT
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %gep = getelementptr i32, ptr addrspace(1) %gaddr, i64 %idxprom
+ call void @llvm.amdgcn.global.load.async.to.lds.b64(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.store.async.from.lds.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.store.async.from.lds.ll
new file mode 100644
index 0000000..fd35313
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.store.async.from.lds.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
+declare void @llvm.amdgcn.global.store.async.from.lds.b8(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr, i32 %offset, i32 %cpol)
+declare void @llvm.amdgcn.global.store.async.from.lds.b32(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr, i32 %offset, i32 %cpol)
+declare void @llvm.amdgcn.global.store.async.from.lds.b64(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr, i32 %offset, i32 %cpol)
+declare void @llvm.amdgcn.global.store.async.from.lds.b128(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr, i32 %offset, i32 %cpol)
+
+define amdgpu_ps void @global_store_async_from_lds_b8_vaddr(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-SDAG-LABEL: global_store_async_from_lds_b8_vaddr:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 32, v[0:1]
+; GFX1250-SDAG-NEXT: global_store_async_from_lds_b8 v[0:1], v2, off offset:16 th:TH_STORE_NT
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_store_async_from_lds_b8_vaddr:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 32
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: global_store_async_from_lds_b8 v[0:1], v2, off offset:16 th:TH_STORE_NT
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.store.async.from.lds.b8(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 1)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b8_saddr(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-LABEL: global_store_async_from_lds_b8_saddr:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b32_e32 v1, 32
+; GFX1250-NEXT: global_store_async_from_lds_b8 v1, v0, s[0:1] offset:16
+; GFX1250-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.store.async.from.lds.b8(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b32(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-SDAG-LABEL: global_store_async_from_lds_b32:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 32, v[0:1]
+; GFX1250-SDAG-NEXT: global_store_async_from_lds_b32 v[0:1], v2, off offset:16 th:TH_STORE_HT scope:SCOPE_SE
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_store_async_from_lds_b32:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 32
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: global_store_async_from_lds_b32 v[0:1], v2, off offset:16 th:TH_STORE_HT scope:SCOPE_SE
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.store.async.from.lds.b32(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 10)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b32_saddr(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-LABEL: global_store_async_from_lds_b32_saddr:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b32_e32 v1, 32
+; GFX1250-NEXT: global_store_async_from_lds_b32 v1, v0, s[0:1] offset:16
+; GFX1250-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.store.async.from.lds.b32(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b64_vaddr(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-SDAG-LABEL: global_store_async_from_lds_b64_vaddr:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 32, v[0:1]
+; GFX1250-SDAG-NEXT: global_store_async_from_lds_b64 v[0:1], v2, off offset:16 th:TH_STORE_NT_HT scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_store_async_from_lds_b64_vaddr:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 32
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: global_store_async_from_lds_b64 v[0:1], v2, off offset:16 th:TH_STORE_NT_HT scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.store.async.from.lds.b64(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 22)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b64_saddr(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-LABEL: global_store_async_from_lds_b64_saddr:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b32_e32 v1, 32
+; GFX1250-NEXT: global_store_async_from_lds_b64 v1, v0, s[0:1] offset:16
+; GFX1250-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.store.async.from.lds.b64(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b128_vaddr(ptr addrspace(1) %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-SDAG-LABEL: global_store_async_from_lds_b128_vaddr:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], 32, v[0:1]
+; GFX1250-SDAG-NEXT: global_store_async_from_lds_b128 v[0:1], v2, off offset:16 th:TH_STORE_BYPASS scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_store_async_from_lds_b128_vaddr:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 32
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: global_store_async_from_lds_b128 v[0:1], v2, off offset:16 th:TH_STORE_BYPASS scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.store.async.from.lds.b128(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 27)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b128_saddr(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr) {
+; GFX1250-LABEL: global_store_async_from_lds_b128_saddr:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b32_e32 v1, 32
+; GFX1250-NEXT: global_store_async_from_lds_b128 v1, v0, s[0:1] offset:16
+; GFX1250-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i32 4
+ call void @llvm.amdgcn.global.store.async.from.lds.b128(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 0)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b32_saddr_scale_offset(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr, i32 %idx) {
+; GFX1250-LABEL: global_store_async_from_lds_b32_saddr_scale_offset:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: global_store_async_from_lds_b32 v1, v0, s[0:1] offset:16 scale_offset th:TH_STORE_NT
+; GFX1250-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %gep = getelementptr i32, ptr addrspace(1) %gaddr, i64 %idxprom
+ call void @llvm.amdgcn.global.store.async.from.lds.b32(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 1)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b64_saddr_scale_offset(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr, i32 %idx) {
+; GFX1250-LABEL: global_store_async_from_lds_b64_saddr_scale_offset:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: global_store_async_from_lds_b64 v1, v0, s[0:1] offset:16 scale_offset th:TH_STORE_NT
+; GFX1250-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %gep = getelementptr i64, ptr addrspace(1) %gaddr, i64 %idxprom
+ call void @llvm.amdgcn.global.store.async.from.lds.b64(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 1)
+ ret void
+}
+
+define amdgpu_ps void @global_store_async_from_lds_b64_saddr_no_scale_offset(ptr addrspace(1) inreg %gaddr, ptr addrspace(3) %laddr, i32 %idx) {
+; GFX1250-SDAG-LABEL: global_store_async_from_lds_b64_saddr_no_scale_offset:
+; GFX1250-SDAG: ; %bb.0: ; %entry
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, v1
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[2:3], v[2:3], 2, s[0:1]
+; GFX1250-SDAG-NEXT: global_store_async_from_lds_b64 v[2:3], v0, off offset:16 th:TH_STORE_NT
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: global_store_async_from_lds_b64_saddr_no_scale_offset:
+; GFX1250-GISEL: ; %bb.0: ; %entry
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX1250-GISEL-NEXT: v_lshlrev_b64_e32 v[2:3], 2, v[2:3]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v4, v2
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, v5, v3, vcc_lo
+; GFX1250-GISEL-NEXT: global_store_async_from_lds_b64 v[2:3], v0, off offset:16 th:TH_STORE_NT
+; GFX1250-GISEL-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %gep = getelementptr i32, ptr addrspace(1) %gaddr, i64 %idxprom
+ call void @llvm.amdgcn.global.store.async.from.lds.b64(ptr addrspace(1) %gep, ptr addrspace(3) %laddr, i32 16, i32 1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll
index fc0f4eb..7959cee 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll
@@ -15,8 +15,8 @@ define amdgpu_kernel void @test_iglp_opt_mfma_gemm(ptr addrspace(3) noalias %in,
; GCN-LABEL: test_iglp_opt_mfma_gemm:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; GCN-NEXT: v_mov_b32_e32 v3, 2.0
; GCN-NEXT: ; iglp_opt mask(0x00000000)
; GCN-NEXT: s_waitcnt lgkmcnt(0)
@@ -153,8 +153,8 @@ define amdgpu_kernel void @test_iglp_opt_rev_mfma_gemm(ptr addrspace(3) noalias
; GCN-LABEL: test_iglp_opt_rev_mfma_gemm:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; GCN-NEXT: v_mov_b32_e32 v2, 1.0
; GCN-NEXT: v_mov_b32_e32 v3, 2.0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
@@ -289,8 +289,8 @@ define amdgpu_kernel void @test_iglp_opt_asm_sideeffect(ptr addrspace(3) noalias
; GCN-LABEL: test_iglp_opt_asm_sideeffect:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GCN-NEXT: ; iglp_opt mask(0x00000000)
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_u32_e32 v1, s0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.xf32.gfx942.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.xf32.gfx942.ll
index 31a48de..198cac5 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.xf32.gfx942.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.xf32.gfx942.ll
@@ -56,6 +56,53 @@ bb:
ret void
}
+define amdgpu_kernel void @test_mfma_f32_16x16x8xf32_vgprcd(ptr addrspace(1) %arg) #1 {
+; GFX942-SDAG-LABEL: test_mfma_f32_16x16x8xf32_vgprcd:
+; GFX942-SDAG: ; %bb.0: ; %bb
+; GFX942-SDAG-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v4, 1.0
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v5, 2.0
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v6, 0x40400000
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v7, 4.0
+; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-SDAG-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v8, 0
+; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX942-SDAG-NEXT: s_nop 1
+; GFX942-SDAG-NEXT: v_mfma_f32_16x16x8_xf32 v[0:3], v[4:5], v[6:7], v[0:3] cbsz:1 abid:2 blgp:3
+; GFX942-SDAG-NEXT: s_nop 6
+; GFX942-SDAG-NEXT: global_store_dwordx4 v8, v[0:3], s[6:7]
+; GFX942-SDAG-NEXT: s_endpgm
+;
+; GFX942-GISEL-LABEL: test_mfma_f32_16x16x8xf32_vgprcd:
+; GFX942-GISEL: ; %bb.0: ; %bb
+; GFX942-GISEL-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
+; GFX942-GISEL-NEXT: s_mov_b32 s4, 1.0
+; GFX942-GISEL-NEXT: s_mov_b32 s5, 2.0
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
+; GFX942-GISEL-NEXT: s_mov_b32 s4, 0x40400000
+; GFX942-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-GISEL-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0
+; GFX942-GISEL-NEXT: s_mov_b32 s5, 4.0
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5]
+; GFX942-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX942-GISEL-NEXT: s_nop 1
+; GFX942-GISEL-NEXT: v_mfma_f32_16x16x8_xf32 v[0:3], v[4:5], v[6:7], v[0:3] cbsz:1 abid:2 blgp:3
+; GFX942-GISEL-NEXT: v_mov_b32_e32 v4, 0
+; GFX942-GISEL-NEXT: s_nop 5
+; GFX942-GISEL-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX942-GISEL-NEXT: s_endpgm
+bb:
+ %in.1 = load <4 x float>, ptr addrspace(1) %arg
+ %mai.1 = tail call <4 x float> @llvm.amdgcn.mfma.f32.16x16x8.xf32(<2 x float> <float 1.0, float 2.0>, <2 x float> <float 3.0, float 4.0>, <4 x float> %in.1, i32 1, i32 2, i32 3)
+ store <4 x float> %mai.1, ptr addrspace(1) %arg
+ ret void
+}
+
define amdgpu_kernel void @test_mfma_f32_32x32x4xf32(ptr addrspace(1) %arg) #0 {
; GFX942-SDAG-LABEL: test_mfma_f32_32x32x4xf32:
; GFX942-SDAG: ; %bb.0: ; %bb
@@ -139,6 +186,75 @@ bb:
ret void
}
+define amdgpu_kernel void @test_mfma_f32_32x32x4xf32_vgprcd(ptr addrspace(1) %arg) #1 {
+; GFX942-SDAG-LABEL: test_mfma_f32_32x32x4xf32_vgprcd:
+; GFX942-SDAG: ; %bb.0: ; %bb
+; GFX942-SDAG-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x24
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v16, 1.0
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v17, 2.0
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v18, 0x40400000
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v19, 4.0
+; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-SDAG-NEXT: s_load_dwordx16 s[0:15], s[16:17], 0x0
+; GFX942-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[6:7]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[8:9], s[8:9]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
+; GFX942-SDAG-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
+; GFX942-SDAG-NEXT: s_nop 1
+; GFX942-SDAG-NEXT: v_mfma_f32_32x32x4_xf32 v[0:15], v[16:17], v[18:19], v[0:15] cbsz:1 abid:2 blgp:3
+; GFX942-SDAG-NEXT: v_mov_b32_e32 v16, 0
+; GFX942-SDAG-NEXT: s_nop 7
+; GFX942-SDAG-NEXT: s_nop 1
+; GFX942-SDAG-NEXT: global_store_dwordx4 v16, v[12:15], s[16:17] offset:48
+; GFX942-SDAG-NEXT: global_store_dwordx4 v16, v[8:11], s[16:17] offset:32
+; GFX942-SDAG-NEXT: global_store_dwordx4 v16, v[4:7], s[16:17] offset:16
+; GFX942-SDAG-NEXT: global_store_dwordx4 v16, v[0:3], s[16:17]
+; GFX942-SDAG-NEXT: s_endpgm
+;
+; GFX942-GISEL-LABEL: test_mfma_f32_32x32x4xf32_vgprcd:
+; GFX942-GISEL: ; %bb.0: ; %bb
+; GFX942-GISEL-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x24
+; GFX942-GISEL-NEXT: s_mov_b32 s18, 1.0
+; GFX942-GISEL-NEXT: s_mov_b32 s19, 2.0
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[18:19]
+; GFX942-GISEL-NEXT: s_mov_b32 s18, 0x40400000
+; GFX942-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-GISEL-NEXT: s_load_dwordx16 s[0:15], s[16:17], 0x0
+; GFX942-GISEL-NEXT: s_mov_b32 s19, 4.0
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[18:19], s[18:19]
+; GFX942-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[6:7]
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9]
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11]
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[12:13]
+; GFX942-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[14:15]
+; GFX942-GISEL-NEXT: s_nop 1
+; GFX942-GISEL-NEXT: v_mfma_f32_32x32x4_xf32 v[0:15], v[16:17], v[18:19], v[0:15] cbsz:1 abid:2 blgp:3
+; GFX942-GISEL-NEXT: v_mov_b32_e32 v16, 0
+; GFX942-GISEL-NEXT: s_nop 7
+; GFX942-GISEL-NEXT: s_nop 1
+; GFX942-GISEL-NEXT: global_store_dwordx4 v16, v[0:3], s[16:17]
+; GFX942-GISEL-NEXT: global_store_dwordx4 v16, v[4:7], s[16:17] offset:16
+; GFX942-GISEL-NEXT: global_store_dwordx4 v16, v[8:11], s[16:17] offset:32
+; GFX942-GISEL-NEXT: global_store_dwordx4 v16, v[12:15], s[16:17] offset:48
+; GFX942-GISEL-NEXT: s_endpgm
+bb:
+ %in.1 = load <16 x float>, ptr addrspace(1) %arg
+ %mai.1 = tail call <16 x float> @llvm.amdgcn.mfma.f32.32x32x4.xf32(<2 x float> <float 1.0, float 2.0>, <2 x float> <float 3.0, float 4.0>, <16 x float> %in.1, i32 1, i32 2, i32 3)
+ store <16 x float> %mai.1, ptr addrspace(1) %arg
+ ret void
+}
+
attributes #0 = { "amdgpu-flat-work-group-size"="1,256" }
+attributes #1 = { "amdgpu-flat-work-group-size"="512,512" "amdgpu-agpr-alloc"="0,0" }
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX942: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
index 425a853..477f0a6 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
@@ -51,7 +51,7 @@ define amdgpu_kernel void @safe_no_fp32_denormals_rcp_f32(ptr addrspace(1) %out,
; SI-NOT: [[RESULT]]
; SI: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @safe_f32_denormals_rcp_pat_f32(ptr addrspace(1) %out, float %src) #4 {
- %rcp = fdiv float 1.0, %src, !fpmath !0
+ %rcp = fdiv afn float 1.0, %src, !fpmath !0
store float %rcp, ptr addrspace(1) %out, align 4
ret void
}
@@ -105,8 +105,8 @@ define amdgpu_kernel void @safe_rsq_rcp_pat_amdgcn_sqrt_f32_nocontract(ptr addrs
; SI: v_sqrt_f32_e32
; SI: v_rcp_f32_e32
define amdgpu_kernel void @unsafe_rsq_rcp_pat_f32(ptr addrspace(1) %out, float %src) #2 {
- %sqrt = call float @llvm.sqrt.f32(float %src)
- %rcp = call float @llvm.amdgcn.rcp.f32(float %sqrt)
+ %sqrt = call afn float @llvm.sqrt.f32(float %src)
+ %rcp = call afn float @llvm.amdgcn.rcp.f32(float %sqrt)
store float %rcp, ptr addrspace(1) %out, align 4
ret void
}
@@ -148,7 +148,7 @@ define amdgpu_kernel void @rcp_pat_f64(ptr addrspace(1) %out, double %src) #1 {
; SI: v_fma_f64
; SI: v_fma_f64
define amdgpu_kernel void @unsafe_rcp_pat_f64(ptr addrspace(1) %out, double %src) #2 {
- %rcp = fdiv double 1.0, %src
+ %rcp = fdiv afn double 1.0, %src
store double %rcp, ptr addrspace(1) %out, align 8
ret void
}
@@ -214,9 +214,9 @@ define amdgpu_kernel void @unsafe_amdgcn_sqrt_rsq_rcp_pat_f64(ptr addrspace(1) %
}
attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #3 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
!0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx11.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx11.ll
index ae8ace2..2052347 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx11.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx11.ll
@@ -6,9 +6,9 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_WMMA_cluster(ptr ad
; GCN-LABEL: test_sched_group_barrier_pipeline_WMMA_cluster:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 5, v0
; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_lshlrev_b32_e32 v40, 5, v0
+; GCN-NEXT: v_and_b32_e32 v40, 0x7fe0, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_nc_u32_e32 v32, s0, v40
; GCN-NEXT: v_dual_mov_b32 v81, s1 :: v_dual_add_nc_u32 v80, s1, v40
@@ -74,9 +74,9 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_WMMA_cluster(ptr ad
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_WMMA_cluster:
; EXACTCUTOFF: ; %bb.0: ; %entry
; EXACTCUTOFF-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 5, v0
; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v40, 5, v0
+; EXACTCUTOFF-NEXT: v_and_b32_e32 v40, 0x7fe0, v0
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_add_nc_u32_e32 v32, s0, v40
; EXACTCUTOFF-NEXT: v_dual_mov_b32 v81, s1 :: v_dual_add_nc_u32 v80, s1, v40
@@ -178,9 +178,9 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_WMMA_interleave(ptr
; GCN-LABEL: test_sched_group_barrier_pipeline_WMMA_interleave:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 5, v0
; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_lshlrev_b32_e32 v16, 5, v0
+; GCN-NEXT: v_and_b32_e32 v16, 0x7fe0, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_nc_u32_e32 v17, s0, v16
; GCN-NEXT: v_add_nc_u32_e32 v16, s1, v16
@@ -260,9 +260,9 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_WMMA_interleave(ptr
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_WMMA_interleave:
; EXACTCUTOFF: ; %bb.0: ; %entry
; EXACTCUTOFF-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 5, v0
; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v16, 5, v0
+; EXACTCUTOFF-NEXT: v_and_b32_e32 v16, 0x7fe0, v0
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_add_nc_u32_e32 v17, s0, v16
; EXACTCUTOFF-NEXT: v_add_nc_u32_e32 v16, s1, v16
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll
index 02e80b6..dcc3e0df 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll
@@ -8,10 +8,10 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_SWMMAC_cluster(ptr
; GCN-LABEL: test_sched_group_barrier_pipeline_SWMMAC_cluster:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GCN-NEXT: v_mov_b32_e32 v48, 0
; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_lshlrev_b32_e32 v28, 4, v0
+; GCN-NEXT: v_and_b32_e32 v28, 0x3ff0, v0
; GCN-NEXT: s_wait_kmcnt 0x0
; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v28
; GCN-NEXT: v_dual_mov_b32 v50, s1 :: v_dual_add_nc_u32 v49, s1, v28
@@ -60,10 +60,10 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_SWMMAC_cluster(ptr
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_SWMMAC_cluster:
; EXACTCUTOFF: ; %bb.0: ; %entry
; EXACTCUTOFF-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v48, 0
; EXACTCUTOFF-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v28, 4, v0
+; EXACTCUTOFF-NEXT: v_and_b32_e32 v28, 0x3ff0, v0
; EXACTCUTOFF-NEXT: s_wait_kmcnt 0x0
; EXACTCUTOFF-NEXT: v_add_nc_u32_e32 v0, s0, v28
; EXACTCUTOFF-NEXT: v_dual_mov_b32 v50, s1 :: v_dual_add_nc_u32 v49, s1, v28
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.iterative.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.iterative.ll
index 04fcdc6..af26e7a 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.iterative.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.iterative.ll
@@ -7,8 +7,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr
; GCN-MINREG-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
; GCN-MINREG: ; %bb.0: ; %entry
; GCN-MINREG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-MINREG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-MINREG-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-MINREG-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; GCN-MINREG-NEXT: v_mov_b32_e32 v2, 1.0
; GCN-MINREG-NEXT: v_mov_b32_e32 v1, 2.0
; GCN-MINREG-NEXT: s_waitcnt lgkmcnt(0)
@@ -140,8 +140,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr
; GCN-MAXOCC-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
; GCN-MAXOCC: ; %bb.0: ; %entry
; GCN-MAXOCC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-MAXOCC-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-MAXOCC-NEXT: v_lshlrev_b32_e32 v1, 7, v0
+; GCN-MAXOCC-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-MAXOCC-NEXT: v_and_b32_e32 v1, 0x1ff80, v0
; GCN-MAXOCC-NEXT: v_mov_b32_e32 v2, 1.0
; GCN-MAXOCC-NEXT: v_mov_b32_e32 v3, 2.0
; GCN-MAXOCC-NEXT: s_waitcnt lgkmcnt(0)
@@ -274,8 +274,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr
; GCN-ILP-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
; GCN-ILP: ; %bb.0: ; %entry
; GCN-ILP-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-ILP-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-ILP-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-ILP-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; GCN-ILP-NEXT: v_mov_b32_e32 v1, 1.0
; GCN-ILP-NEXT: v_mov_b32_e32 v2, 2.0
; GCN-ILP-NEXT: s_waitcnt lgkmcnt(0)
@@ -469,8 +469,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave_spl
; GCN-MINREG-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave_split_region:
; GCN-MINREG: ; %bb.0: ; %entry
; GCN-MINREG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-MINREG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-MINREG-NEXT: v_lshlrev_b32_e32 v2, 7, v0
+; GCN-MINREG-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-MINREG-NEXT: v_and_b32_e32 v2, 0x1ff80, v0
; GCN-MINREG-NEXT: v_mov_b32_e32 v1, 1.0
; GCN-MINREG-NEXT: v_mov_b32_e32 v0, 2.0
; GCN-MINREG-NEXT: s_waitcnt lgkmcnt(0)
@@ -604,8 +604,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave_spl
; GCN-MAXOCC-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave_split_region:
; GCN-MAXOCC: ; %bb.0: ; %entry
; GCN-MAXOCC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-MAXOCC-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-MAXOCC-NEXT: v_lshlrev_b32_e32 v3, 7, v0
+; GCN-MAXOCC-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-MAXOCC-NEXT: v_and_b32_e32 v3, 0x1ff80, v0
; GCN-MAXOCC-NEXT: v_mov_b32_e32 v1, 1.0
; GCN-MAXOCC-NEXT: v_mov_b32_e32 v2, 2.0
; GCN-MAXOCC-NEXT: s_waitcnt lgkmcnt(0)
@@ -739,8 +739,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave_spl
; GCN-ILP-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave_split_region:
; GCN-ILP: ; %bb.0: ; %entry
; GCN-ILP-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-ILP-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-ILP-NEXT: v_lshlrev_b32_e32 v2, 7, v0
+; GCN-ILP-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-ILP-NEXT: v_and_b32_e32 v2, 0x1ff80, v0
; GCN-ILP-NEXT: v_mov_b32_e32 v0, 1.0
; GCN-ILP-NEXT: v_mov_b32_e32 v1, 2.0
; GCN-ILP-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll
index c8552d8..5b877f5 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll
@@ -621,8 +621,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad
; GCN-LABEL: test_sched_group_barrier_pipeline_MFMA_cluster:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_u32_e32 v1, s0, v0
; GCN-NEXT: ds_read_b128 a[156:159], v1 offset:112
@@ -728,8 +728,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_MFMA_cluster:
; EXACTCUTOFF: ; %bb.0: ; %entry
; EXACTCUTOFF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, s0, v0
; EXACTCUTOFF-NEXT: ds_read_b128 a[156:159], v1 offset:112
@@ -871,8 +871,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr
; GCN-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; GCN-NEXT: v_mov_b32_e32 v2, 1.0
; GCN-NEXT: v_mov_b32_e32 v3, 2.0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
@@ -1005,8 +1005,8 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(ptr
; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
; EXACTCUTOFF: ; %bb.0: ; %entry
; EXACTCUTOFF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v2, 1.0
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v3, 2.0
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
@@ -1202,7 +1202,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA
; GCN-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b
; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v7, 0x32a5705f
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v4, s0, v3
; GCN-NEXT: v_rndne_f32_e32 v5, v4
@@ -1212,7 +1212,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA
; GCN-NEXT: v_add_f32_e32 v4, v6, v4
; GCN-NEXT: v_exp_f32_e32 v4, v4
; GCN-NEXT: v_cvt_i32_f32_e32 v5, v5
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; GCN-NEXT: v_add_u32_e32 v1, s6, v0
; GCN-NEXT: ds_read_b128 a[124:127], v1 offset:112
; GCN-NEXT: ds_read_b128 a[120:123], v1 offset:96
@@ -1387,7 +1387,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b
; EXACTCUTOFF-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
; EXACTCUTOFF-NEXT: v_mov_b32_e32 v7, 0x32a5705f
-; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0
; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0)
; EXACTCUTOFF-NEXT: v_mul_f32_e32 v4, s0, v3
; EXACTCUTOFF-NEXT: v_rndne_f32_e32 v5, v4
@@ -1397,7 +1397,7 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_interleave_EXP_MFMA
; EXACTCUTOFF-NEXT: v_add_f32_e32 v4, v6, v4
; EXACTCUTOFF-NEXT: v_exp_f32_e32 v4, v4
; EXACTCUTOFF-NEXT: v_cvt_i32_f32_e32 v5, v5
-; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x1ff80, v0
; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, s6, v0
; EXACTCUTOFF-NEXT: ds_read_b128 a[124:127], v1 offset:112
; EXACTCUTOFF-NEXT: ds_read_b128 a[120:123], v1 offset:96
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
index 8b78c4e68..7d44d91 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
@@ -208,11 +208,11 @@ define weak_odr amdgpu_kernel void @dpp_test1(ptr %arg) local_unnamed_addr {
;
; GFX11-LABEL: dpp_test1:
; GFX11: ; %bb.0: ; %bb
-; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-NEXT: ds_load_b32 v1, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_barrier
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index 8c1e166..7151fee 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -3227,72 +3227,6 @@ define float @v_exp_f32_fast(float %in) {
ret float %result
}
-define float @v_exp_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; GCN-SDAG-LABEL: v_exp_f32_unsafe_math_attr:
-; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SDAG-NEXT: s_mov_b32 s4, 0xc2aeac50
-; GCN-SDAG-NEXT: v_add_f32_e32 v1, 0x42800000, v0
-; GCN-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN-GISEL-LABEL: v_exp_f32_unsafe_math_attr:
-; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; GCN-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; GCN-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; GCN-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SDAG-LABEL: v_exp_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0xc2aeac50
-; SI-SDAG-NEXT: v_add_f32_e32 v1, 0x42800000, v0
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_exp_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; SI-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; SI-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; SI-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_exp_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_exp_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.exp.f32(float %in)
- ret float %result
-}
-
define float @v_exp_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; GCN-SDAG-LABEL: v_exp_f32_approx_fn_attr:
; GCN-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
index edc505b..918b1b2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
@@ -3235,78 +3235,6 @@ define float @v_exp10_f32_fast(float %in) {
ret float %result
}
-define float @v_exp10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; GCN-SDAG-LABEL: v_exp10_f32_unsafe_math_attr:
-; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SDAG-NEXT: s_mov_b32 s4, 0xc217b818
-; GCN-SDAG-NEXT: v_add_f32_e32 v1, 0x42000000, v0
-; GCN-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0x3a2784bc, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, 0x40549000, v0
-; GCN-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; GCN-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, v0, v1
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0xa4fb11f, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN-GISEL-LABEL: v_exp10_f32_unsafe_math_attr:
-; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; GCN-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; GCN-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; GCN-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SDAG-LABEL: v_exp10_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0xc217b818
-; SI-SDAG-NEXT: v_add_f32_e32 v1, 0x42000000, v0
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3a2784bc, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x40549000, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, v0, v1
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0xa4fb11f, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_exp10_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; SI-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; SI-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; SI-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_exp10_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_exp10_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.exp10.f32(float %in)
- ret float %result
-}
-
define float @v_exp10_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; GCN-SDAG-LABEL: v_exp10_f32_approx_fn_attr:
; GCN-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
index 38d1b47..307fa89 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
@@ -3076,121 +3076,6 @@ define float @v_log_f32_fast(float %in) {
ret float %result
}
-define float @v_log_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; SI-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v2
-; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x3f317218
-; SI-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; SI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x3f317218
-; SI-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; VI-SDAG: ; %bb.0:
-; VI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; VI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; VI-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; VI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; VI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; VI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3f317218, v0
-; VI-SDAG-NEXT: v_add_f32_e32 v0, v0, v1
-; VI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; VI-GISEL: ; %bb.0:
-; VI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; VI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; VI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; VI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; VI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; VI-GISEL-NEXT: v_mul_f32_e32 v1, 0x3f317218, v2
-; VI-GISEL-NEXT: v_add_f32_e32 v0, v1, v0
-; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; GFX900-SDAG: ; %bb.0:
-; GFX900-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; GFX900-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GFX900-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; GFX900-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX900-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX900-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; GFX900-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x3f317218
-; GFX900-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; GFX900-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; GFX900-GISEL: ; %bb.0:
-; GFX900-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-GISEL-NEXT: v_log_f32_e32 v2, v0
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; GFX900-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GFX900-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x3f317218
-; GFX900-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; GFX900-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; GFX1100-SDAG: ; %bb.0:
-; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-SDAG-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc_lo
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 0xc1b17218, vcc_lo
-; GFX1100-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1100-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX1100-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX1100-SDAG-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-SDAG-NEXT: v_fmamk_f32 v0, v0, 0x3f317218, v1
-; GFX1100-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; GFX1100-GISEL: ; %bb.0:
-; GFX1100-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-GISEL-NEXT: v_log_f32_e32 v1, v0
-; GFX1100-GISEL-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 0xc1b17218, vcc_lo
-; GFX1100-GISEL-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT: v_fmac_f32_e32 v0, 0x3f317218, v1
-; GFX1100-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_log_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_log_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.log.f32(float %in)
- ret float %result
-}
-
define float @v_log_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; SI-SDAG-LABEL: v_log_f32_approx_fn_attr:
; SI-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
index 058933f..5278589 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
@@ -3076,121 +3076,6 @@ define float @v_log10_f32_fast(float %in) {
ret float %result
}
-define float @v_log10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; SI-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v2
-; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x3e9a209b
-; SI-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; SI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e9a209b
-; SI-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; VI-SDAG: ; %bb.0:
-; VI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; VI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; VI-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; VI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; VI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; VI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3e9a209b, v0
-; VI-SDAG-NEXT: v_add_f32_e32 v0, v0, v1
-; VI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; VI-GISEL: ; %bb.0:
-; VI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; VI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; VI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; VI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; VI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; VI-GISEL-NEXT: v_mul_f32_e32 v1, 0x3e9a209b, v2
-; VI-GISEL-NEXT: v_add_f32_e32 v0, v1, v0
-; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX900-SDAG: ; %bb.0:
-; GFX900-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; GFX900-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GFX900-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; GFX900-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX900-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX900-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; GFX900-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x3e9a209b
-; GFX900-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; GFX900-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX900-GISEL: ; %bb.0:
-; GFX900-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-GISEL-NEXT: v_log_f32_e32 v2, v0
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; GFX900-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GFX900-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e9a209b
-; GFX900-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; GFX900-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX1100-SDAG: ; %bb.0:
-; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-SDAG-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc_lo
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 0xc11a209b, vcc_lo
-; GFX1100-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1100-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX1100-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX1100-SDAG-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-SDAG-NEXT: v_fmamk_f32 v0, v0, 0x3e9a209b, v1
-; GFX1100-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX1100-GISEL: ; %bb.0:
-; GFX1100-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-GISEL-NEXT: v_log_f32_e32 v1, v0
-; GFX1100-GISEL-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 0xc11a209b, vcc_lo
-; GFX1100-GISEL-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT: v_fmac_f32_e32 v0, 0x3e9a209b, v1
-; GFX1100-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_log10_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_log10_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.log10.f32(float %in)
- ret float %result
-}
-
define float @v_log10_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; SI-SDAG-LABEL: v_log10_f32_approx_fn_attr:
; SI-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
index 1e6b77e..22ebb55 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
@@ -77,7 +77,7 @@ define amdgpu_kernel void @copy_flat(ptr nocapture %d, ptr nocapture readonly %s
; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], 16
; GFX1250-NEXT: s_cmp_lg_u32 s6, 0
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: flat_store_b128 v0, v[2:5], s[0:1]
+; GFX1250-NEXT: flat_store_b128 v0, v[2:5], s[0:1] scope:SCOPE_SE
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[0:1], 16
; GFX1250-NEXT: s_cbranch_scc1 .LBB0_2
@@ -490,7 +490,7 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_cmp_lg_u32 s0, 0
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: flat_store_b128 v[0:1], v[4:7]
+; GFX1250-NEXT: flat_store_b128 v[0:1], v[4:7] scope:SCOPE_SE
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], 16, v[0:1]
; GFX1250-NEXT: s_cbranch_scc1 .LBB4_2
diff --git a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
index eb28e6f..05a0b1a 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
@@ -1,9 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -< %s | FileCheck --check-prefixes=GCN,GFX9 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -< %s | FileCheck --check-prefixes=GCN,GFX10 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-vopd=0 -amdgpu-enable-delay-alu=0 -< %s | FileCheck --check-prefixes=GCN,GFX11 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -mattr=+wavefrontsize64 -< %s | FileCheck --check-prefixes=GCN,GFX10 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize64 -< %s | FileCheck --check-prefixes=GCN,GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck --check-prefixes=GFX9 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 < %s | FileCheck --check-prefixes=GFX10 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-vopd=0 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -mattr=+wavefrontsize64 < %s | FileCheck --check-prefixes=GFX10 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize64 < %s | FileCheck --check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX1250 %s
define amdgpu_ps float @mad_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GFX9-LABEL: mad_i32_vvv:
@@ -22,6 +23,11 @@ define amdgpu_ps float @mad_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: v_mov_b32_e32 v4, v0
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v4, v3, v[2:3]
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vvv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, v2
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
%cast = bitcast i32 %add to float
@@ -35,6 +41,34 @@ define amdgpu_ps float @mad_i32_sss(i32 inreg %a, i32 inreg %b, i32 inreg %c) {
; GCN-NEXT: s_add_i32 s0, s0, s2
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: mad_i32_sss:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mul_i32 s0, s0, s1
+; GFX9-NEXT: s_add_i32 s0, s0, s2
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: mad_i32_sss:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_mul_i32 s0, s0, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s2
+; GFX10-NEXT: v_mov_b32_e32 v0, s0
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: mad_i32_sss:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_mul_i32 s0, s0, s1
+; GFX11-NEXT: s_add_i32 s0, s0, s2
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_sss:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_mul_i32 s0, s0, s1
+; GFX1250-NEXT: s_add_co_i32 s0, s0, s2
+; GFX1250-NEXT: v_mov_b32_e32 v0, s0
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
%cast = bitcast i32 %add to float
@@ -58,6 +92,11 @@ define amdgpu_ps float @mad_i32_vvc(i32 %a, i32 %b) {
; GFX11-NEXT: v_mov_b32_e32 v3, v0
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, 42
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vvc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, 42
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, 42
%cast = bitcast i32 %add to float
@@ -83,6 +122,11 @@ define amdgpu_ps float @mad_i32_vvi(i32 %a, i32 %b) {
; GFX11-NEXT: v_mov_b32_e32 v3, v0
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, 0x12d687
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vvi:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, 0x12d687
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, 1234567
%cast = bitcast i32 %add to float
@@ -108,6 +152,11 @@ define amdgpu_ps float @mad_i32_vvi_neg(i32 %a, i32 %b) {
; GFX11-NEXT: v_mov_b32_e32 v3, v0
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, 0xffffffffffed2979
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vvi_neg:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, 0xffed2979
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, -1234567
%cast = bitcast i32 %add to float
@@ -130,6 +179,11 @@ define amdgpu_ps float @mad_i32_vcv(i32 %a, i32 %c) {
; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, 42, v[1:2]
; GFX11-NEXT: v_mov_b32_e32 v0, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vcv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, v0, 42, v1
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, 42
%add = add i32 %mul, %c
%cast = bitcast i32 %add to float
@@ -152,6 +206,11 @@ define amdgpu_ps float @mad_i32_vcc(i32 %a) {
; GFX11-NEXT: v_mov_b32_e32 v2, v0
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v2, 42, 43
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vcc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, v0, 42, 43
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, 42
%add = add i32 %mul, 43
%cast = bitcast i32 %add to float
@@ -175,6 +234,11 @@ define amdgpu_ps float @mad_i32_vvs(i32 %a, i32 %b, i32 inreg %c) {
; GFX11-NEXT: v_mov_b32_e32 v3, v0
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, s[0:1]
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vvs:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, s0
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
%cast = bitcast i32 %add to float
@@ -197,6 +261,11 @@ define amdgpu_ps float @mad_i32_vsv(i32 %a, i32 inreg %b, i32 %c) {
; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, s0, v[1:2]
; GFX11-NEXT: v_mov_b32_e32 v0, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vsv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, v0, s0, v1
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
%cast = bitcast i32 %add to float
@@ -219,6 +288,11 @@ define amdgpu_ps float @mad_i32_svv(i32 inreg %a, i32 %b, i32 %c) {
; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, s0, v0, v[1:2]
; GFX11-NEXT: v_mov_b32_e32 v0, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_svv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, s0, v0, v1
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
%cast = bitcast i32 %add to float
@@ -244,6 +318,11 @@ define amdgpu_ps float @mad_i32_vss(i32 %a, i32 inreg %b, i32 inreg %c) {
; GFX11-NEXT: s_mov_b32 s2, s1
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v2, s0, s[2:3]
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vss:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, v0, s0, s1
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
%cast = bitcast i32 %add to float
@@ -269,6 +348,11 @@ define amdgpu_ps float @mad_i32_svs(i32 inreg %a, i32 %b, i32 inreg %c) {
; GFX11-NEXT: s_mov_b32 s2, s1
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3]
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_svs:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, s0, v0, s1
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
%cast = bitcast i32 %add to float
@@ -292,6 +376,11 @@ define amdgpu_ps float @mad_i32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, s0, s1, v[0:1]
; GFX11-NEXT: v_mov_b32_e32 v0, v1
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_ssv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mad_u32 v0, s0, s1, v0
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
%cast = bitcast i32 %add to float
@@ -322,6 +411,14 @@ define amdgpu_ps float @mad_i32_vvv_multiuse(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: flat_store_b32 v[0:1], v1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: mad_i32_vvv_multiuse:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-NEXT: v_add_nc_u32_e32 v0, v1, v2
+; GFX1250-NEXT: flat_store_b32 v[0:1], v1 scope:SCOPE_SE
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
store i32 %mul, ptr poison
diff --git a/llvm/test/CodeGen/AMDGPU/max.ll b/llvm/test/CodeGen/AMDGPU/max.ll
index b9b29b7..fef9a9a 100644
--- a/llvm/test/CodeGen/AMDGPU/max.ll
+++ b/llvm/test/CodeGen/AMDGPU/max.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc -mtriple=amdgcn -mcpu=pitcairn < %s | FileCheck -enable-var-scope -check-prefix=SI %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 --amdgpu-enable-vopd=0 < %s | FileCheck -enable-var-scope -check-prefix=GFX1250 %s
; RUN: llc -mtriple=r600 -mcpu=cypress < %s | FileCheck -enable-var-scope -check-prefix=EG %s
define amdgpu_kernel void @v_test_imax_sge_i32(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) nounwind {
@@ -24,6 +25,23 @@ define amdgpu_kernel void @v_test_imax_sge_i32(ptr addrspace(1) %out, ptr addrsp
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: v_test_imax_sge_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_max_i32_e32 v0, s2, v0
+; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: v_test_imax_sge_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 3, @10, KC0[CB0:0-32], KC1[]
@@ -80,6 +98,26 @@ define amdgpu_kernel void @v_test_imax_sge_v4i32(ptr addrspace(1) %out, ptr addr
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: v_test_imax_sge_v4i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: v_mov_b32_e32 v4, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b128 v[0:3], v0, s[0:1] scale_offset
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_max_i32_e32 v3, s7, v3
+; GFX1250-NEXT: v_max_i32_e32 v2, s6, v2
+; GFX1250-NEXT: v_max_i32_e32 v1, s5, v1
+; GFX1250-NEXT: v_max_i32_e32 v0, s4, v0
+; GFX1250-NEXT: global_store_b128 v4, v[0:3], s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: v_test_imax_sge_v4i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 3, @10, KC0[CB0:0-32], KC1[]
@@ -127,6 +165,17 @@ define amdgpu_kernel void @s_test_imax_sge_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_imax_sge_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_i32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_imax_sge_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
@@ -156,6 +205,17 @@ define amdgpu_kernel void @s_test_imax_sge_imm_i32(ptr addrspace(1) %out, i32 %a
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_imax_sge_imm_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_i32 s2, s2, 9
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_imax_sge_imm_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
@@ -195,6 +255,23 @@ define amdgpu_kernel void @v_test_imax_sge_i8(ptr addrspace(1) %out, ptr addrspa
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: v_test_imax_sge_i8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_i8 s2, s[2:3], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_i8 s3, s[4:5], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_i32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: v_test_imax_sge_i8:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[]
@@ -250,6 +327,17 @@ define amdgpu_kernel void @s_test_imax_sgt_imm_i32(ptr addrspace(1) %out, i32 %a
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_imax_sgt_imm_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_i32 s2, s2, 9
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_imax_sgt_imm_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
@@ -282,6 +370,18 @@ define amdgpu_kernel void @s_test_imax_sgt_imm_v2i32(ptr addrspace(1) %out, <2 x
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_imax_sgt_imm_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_i32 s2, s2, 9
+; GFX1250-NEXT: s_max_i32 s3, s3, 9
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_mov_b32_e32 v1, s3
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_imax_sgt_imm_v2i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 4, @4, KC0[CB0:0-32], KC1[]
@@ -322,6 +422,23 @@ define amdgpu_kernel void @v_test_imax_sgt_i32(ptr addrspace(1) %out, ptr addrsp
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: v_test_imax_sgt_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_max_i32_e32 v0, s2, v0
+; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: v_test_imax_sgt_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 3, @10, KC0[CB0:0-32], KC1[]
@@ -366,6 +483,17 @@ define amdgpu_kernel void @s_test_imax_sgt_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_imax_sgt_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_i32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_imax_sgt_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
@@ -404,6 +532,23 @@ define amdgpu_kernel void @v_test_umax_uge_i32(ptr addrspace(1) %out, ptr addrsp
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: v_test_umax_uge_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_max_u32_e32 v0, s2, v0
+; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: v_test_umax_uge_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 3, @10, KC0[CB0:0-32], KC1[]
@@ -448,6 +593,17 @@ define amdgpu_kernel void @s_test_umax_uge_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_umax_uge_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_u32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_umax_uge_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
@@ -483,6 +639,22 @@ define amdgpu_kernel void @s_test_umax_uge_v3i32(ptr addrspace(1) %out, <3 x i32
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_umax_uge_v3i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x34
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v3, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_u32 s2, s10, s14
+; GFX1250-NEXT: s_max_u32 s3, s8, s12
+; GFX1250-NEXT: s_max_u32 s4, s9, s13
+; GFX1250-NEXT: v_mov_b32_e32 v0, s3
+; GFX1250-NEXT: v_mov_b32_e32 v1, s4
+; GFX1250-NEXT: v_mov_b32_e32 v2, s2
+; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_umax_uge_v3i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[]
@@ -527,6 +699,23 @@ define amdgpu_kernel void @v_test_umax_uge_i8(ptr addrspace(1) %out, ptr addrspa
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: v_test_umax_uge_i8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_u8 s2, s[2:3], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_u8 s3, s[4:5], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_u32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: v_test_umax_uge_i8:
; EG: ; %bb.0:
; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[]
@@ -581,6 +770,22 @@ define amdgpu_kernel void @v_test_umax_ugt_i32(ptr addrspace(1) %out, ptr addrsp
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: v_test_umax_ugt_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
+; GFX1250-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_max_u32_e32 v0, s2, v0
+; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: v_test_umax_ugt_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 3, @10, KC0[CB0:0-32], KC1[]
@@ -625,6 +830,17 @@ define amdgpu_kernel void @s_test_umax_ugt_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_umax_ugt_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_u32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_umax_ugt_i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
@@ -657,6 +873,18 @@ define amdgpu_kernel void @s_test_umax_ugt_imm_v2i32(ptr addrspace(1) %out, <2 x
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_umax_ugt_imm_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_max_u32 s2, s2, 15
+; GFX1250-NEXT: s_max_u32 s3, s3, 23
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_mov_b32_e32 v1, s3
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_umax_ugt_imm_v2i32:
; EG: ; %bb.0:
; EG-NEXT: ALU 4, @4, KC0[CB0:0-32], KC1[]
@@ -693,6 +921,22 @@ define amdgpu_kernel void @simplify_demanded_bits_test_umax_ugt_i16(ptr addrspac
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: simplify_demanded_bits_test_umax_ugt_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x2
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x4c
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x70
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX1250-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_max_u32 s2, s2, s3
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: simplify_demanded_bits_test_umax_ugt_i16:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @10, KC0[], KC1[]
@@ -740,6 +984,22 @@ define amdgpu_kernel void @simplify_demanded_bits_test_max_slt_i16(ptr addrspace
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: simplify_demanded_bits_test_max_slt_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x2
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x4c
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x70
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_sext_i32_i16 s2, s2
+; GFX1250-NEXT: s_sext_i32_i16 s3, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_max_i32 s2, s2, s3
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: simplify_demanded_bits_test_max_slt_i16:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @10, KC0[], KC1[]
@@ -786,6 +1046,22 @@ define amdgpu_kernel void @s_test_imax_sge_i16(ptr addrspace(1) %out, [8 x i32],
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: s_test_imax_sge_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x2
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x70
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x4c
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_sext_i32_i16 s2, s2
+; GFX1250-NEXT: s_sext_i32_i16 s3, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_max_i32 s2, s3, s2
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: s_test_imax_sge_i16:
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @10, KC0[], KC1[]
@@ -844,6 +1120,17 @@ define amdgpu_kernel void @test_umax_ugt_i64(ptr addrspace(1) %out, i64 %a, i64
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: test_umax_ugt_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: test_umax_ugt_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[]
@@ -886,6 +1173,17 @@ define amdgpu_kernel void @test_umax_uge_i64(ptr addrspace(1) %out, i64 %a, i64
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: test_umax_uge_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: test_umax_uge_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[]
@@ -928,6 +1226,17 @@ define amdgpu_kernel void @test_imax_sgt_i64(ptr addrspace(1) %out, i64 %a, i64
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: test_imax_sgt_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: test_imax_sgt_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[]
@@ -970,6 +1279,17 @@ define amdgpu_kernel void @test_imax_sge_i64(ptr addrspace(1) %out, i64 %a, i64
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
;
+; GFX1250-LABEL: test_imax_sge_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+;
; EG-LABEL: test_imax_sge_i64:
; EG: ; %bb.0:
; EG-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[]
diff --git a/llvm/test/CodeGen/AMDGPU/memory_clause.ll b/llvm/test/CodeGen/AMDGPU/memory_clause.ll
index ad12d02..e6fd6ab 100644
--- a/llvm/test/CodeGen/AMDGPU/memory_clause.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory_clause.ll
@@ -146,8 +146,8 @@ define void @mubuf_clause(ptr addrspace(5) noalias nocapture readonly %arg, ptr
; GCN-LABEL: mubuf_clause:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_and_b32_e32 v2, 0x3ff, v31
-; GCN-NEXT: v_lshlrev_b32_e32 v2, 4, v2
+; GCN-NEXT: v_lshlrev_b32_e32 v2, 4, v31
+; GCN-NEXT: v_and_b32_e32 v2, 0x3ff0, v2
; GCN-NEXT: v_add_u32_e32 v0, v0, v2
; GCN-NEXT: buffer_load_dword v3, v0, s[0:3], 0 offen offset:12
; GCN-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen offset:8
@@ -205,8 +205,8 @@ define void @mubuf_clause(ptr addrspace(5) noalias nocapture readonly %arg, ptr
; GCN-SCRATCH-LABEL: mubuf_clause:
; GCN-SCRATCH: ; %bb.0: ; %bb
; GCN-SCRATCH-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SCRATCH-NEXT: v_and_b32_e32 v2, 0x3ff, v31
-; GCN-SCRATCH-NEXT: v_lshlrev_b32_e32 v18, 4, v2
+; GCN-SCRATCH-NEXT: v_lshlrev_b32_e32 v2, 4, v31
+; GCN-SCRATCH-NEXT: v_and_b32_e32 v18, 0x3ff0, v2
; GCN-SCRATCH-NEXT: v_add_nc_u32_e32 v0, v0, v18
; GCN-SCRATCH-NEXT: s_clause 0x3
; GCN-SCRATCH-NEXT: scratch_load_dwordx4 v[2:5], v0, off
diff --git a/llvm/test/CodeGen/AMDGPU/merged-bfx-opt.ll b/llvm/test/CodeGen/AMDGPU/merged-bfx-opt.ll
new file mode 100644
index 0000000..144cb0d7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/merged-bfx-opt.ll
@@ -0,0 +1,123 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -O3 -mtriple=amdgcn -mcpu=fiji %s -o - | FileCheck %s
+
+define i1 @basic_eq_i16_3x5(i16 %arg) {
+; CHECK-LABEL: basic_eq_i16_3x5:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; CHECK-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %a = and i16 %arg, 31
+ %sh5 = lshr i16 %arg, 5
+ %b = and i16 %sh5, 31
+ %or = or i16 %a, %b
+ %sh10 = lshr i16 %arg, 10
+ %c = and i16 %sh10, 31
+ %or1 = or i16 %or, %c
+ %cmp = icmp eq i16 %or1, 0
+ ret i1 %cmp
+}
+
+define i1 @basic_eq_i32_3x5(i32 %arg) {
+; CHECK-LABEL: basic_eq_i32_3x5:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %a = and i32 %arg, 31
+ %sh5 = lshr i32 %arg, 5
+ %b = and i32 %sh5, 31
+ %or = or i32 %a, %b
+ %sh10 = lshr i32 %arg, 10
+ %c = and i32 %sh10, 31
+ %or1 = or i32 %or, %c
+ %cmp = icmp eq i32 %or1, 0
+ ret i1 %cmp
+}
+
+define i1 @basic_eq_i64_3x5(i64 %arg) {
+; CHECK-LABEL: basic_eq_i64_3x5:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %a = and i64 %arg, 31
+ %sh5 = lshr i64 %arg, 5
+ %b = and i64 %sh5, 31
+ %or = or i64 %a, %b
+ %sh10 = lshr i64 %arg, 10
+ %c = and i64 %sh10, 31
+ %or1 = or i64 %or, %c
+ %cmp = icmp eq i64 %or1, 0
+ ret i1 %cmp
+}
+
+define i1 @basic_ne_i32_3x5(i32 %arg) {
+; CHECK-LABEL: basic_ne_i32_3x5:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %a = and i32 %arg, 31
+ %sh5 = lshr i32 %arg, 5
+ %b = and i32 %sh5, 31
+ %or = or i32 %a, %b
+ %sh10 = lshr i32 %arg, 10
+ %c = and i32 %sh10, 31
+ %or1 = or i32 %or, %c
+ %cmp = icmp ne i32 %or1, 0
+ ret i1 %cmp
+}
+
+define i1 @eq_i32_3x5_holes_in_mask(i32 %arg) {
+; CHECK-LABEL: eq_i32_3x5_holes_in_mask:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7f9f, v0
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %a = and i32 %arg, 31
+ %sh5 = lshr i32 %arg, 7
+ %b = and i32 %sh5, 31
+ %or = or i32 %a, %b
+ %sh10 = lshr i32 %arg, 10
+ %c = and i32 %sh10, 31
+ %or1 = or i32 %or, %c
+ %cmp = icmp ne i32 %or1, 0
+ ret i1 %cmp
+}
+
+define i1 @eq_i32_3x5_all_shifted(i32 %arg) {
+; CHECK-LABEL: eq_i32_3x5_all_shifted:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7ffc, v0
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %sh2 = lshr i32 %arg, 2
+ %a = and i32 %sh2, 31
+ %sh5 = lshr i32 %arg, 7
+ %b = and i32 %sh5, 31
+ %or = or i32 %a, %b
+ %sh10 = lshr i32 %arg, 10
+ %c = and i32 %sh10, 31
+ %or1 = or i32 %or, %c
+ %cmp = icmp ne i32 %or1, 0
+ ret i1 %cmp
+}
diff --git a/llvm/test/CodeGen/AMDGPU/min.ll b/llvm/test/CodeGen/AMDGPU/min.ll
index bf2ddc1..721f974 100644
--- a/llvm/test/CodeGen/AMDGPU/min.ll
+++ b/llvm/test/CodeGen/AMDGPU/min.ll
@@ -6,6 +6,10 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 < %s | FileCheck --check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-vopd=0 < %s | FileCheck --check-prefixes=GFX11,GFX11-TRUE16 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-vopd=0 < %s | FileCheck --check-prefixes=GFX11,GFX11-FAKE16 %s
+; TODO: FIXME-TRUE16 - Enable this llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=+real-true16 -amdgpu-enable-vopd=0 < %s | FileCheck --check-prefixes=GFX1250,GFX1250-TRUE16 %s
+; Crashing on v_test_imin_slt_i16
+; LLVM ERROR: Cannot select: 0x5f895f65b050: i16,ch = load<(load (s16) from %ir.b.gep, addrspace 1)>
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=-real-true16 -amdgpu-enable-vopd=0 < %s | FileCheck --check-prefixes=GFX1250,GFX1250-FAKE16 %s
define amdgpu_kernel void @v_test_imin_sle_i32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
; EG-LABEL: v_test_imin_sle_i32:
@@ -124,6 +128,21 @@ define amdgpu_kernel void @v_test_imin_sle_i32(ptr addrspace(1) %out, ptr addrsp
; GFX11-NEXT: v_min_i32_e32 v1, v1, v2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_imin_sle_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds i32, ptr addrspace(1) %a.ptr, i32 %tid
%b.gep = getelementptr inbounds i32, ptr addrspace(1) %b.ptr, i32 %tid
@@ -206,6 +225,17 @@ define amdgpu_kernel void @s_test_imin_sle_i32(ptr addrspace(1) %out, i32 %a, i3
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_sle_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_i32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp sle i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, ptr addrspace(1) %out, align 4
@@ -282,6 +312,17 @@ define amdgpu_kernel void @s_test_imin_sle_v1i32(ptr addrspace(1) %out, <1 x i32
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_sle_v1i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_i32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp sle <1 x i32> %a, %b
%val = select <1 x i1> %cmp, <1 x i32> %a, <1 x i32> %b
store <1 x i32> %val, ptr addrspace(1) %out
@@ -397,6 +438,24 @@ define amdgpu_kernel void @s_test_imin_sle_v4i32(ptr addrspace(1) %out, <4 x i32
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_sle_v4i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v4, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_i32 s2, s11, s15
+; GFX1250-NEXT: s_min_i32 s3, s10, s14
+; GFX1250-NEXT: s_min_i32 s4, s8, s12
+; GFX1250-NEXT: s_min_i32 s5, s9, s13
+; GFX1250-NEXT: v_mov_b32_e32 v0, s4
+; GFX1250-NEXT: v_mov_b32_e32 v1, s5
+; GFX1250-NEXT: v_mov_b32_e32 v2, s3
+; GFX1250-NEXT: v_mov_b32_e32 v3, s2
+; GFX1250-NEXT: global_store_b128 v4, v[0:3], s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp sle <4 x i32> %a, %b
%val = select <4 x i1> %cmp, <4 x i32> %a, <4 x i32> %b
store <4 x i32> %val, ptr addrspace(1) %out
@@ -514,6 +573,22 @@ define amdgpu_kernel void @s_test_imin_sle_i8(ptr addrspace(1) %out, [8 x i32],
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b8 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_sle_i8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x2
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x4c
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x28
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_sext_i32_i8 s2, s2
+; GFX1250-NEXT: s_sext_i32_i8 s3, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_min_i32 s2, s3, s2
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp sle i8 %a, %b
%val = select i1 %cmp, i8 %a, i8 %b
store i8 %val, ptr addrspace(1) %out
@@ -753,6 +828,42 @@ define amdgpu_kernel void @s_test_imin_sle_v4i8(ptr addrspace(1) %out, [8 x i32]
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_sle_v4i8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x2
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x28
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x4c
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_sext_i32_i16 s5, s2
+; GFX1250-NEXT: s_sext_i32_i16 s7, s3
+; GFX1250-NEXT: s_ashr_i32 s4, s2, 24
+; GFX1250-NEXT: s_ashr_i32 s6, s3, 24
+; GFX1250-NEXT: s_sext_i32_i8 s8, s3
+; GFX1250-NEXT: s_sext_i32_i8 s9, s2
+; GFX1250-NEXT: s_bfe_i32 s3, s3, 0x80010
+; GFX1250-NEXT: s_bfe_i32 s2, s2, 0x80010
+; GFX1250-NEXT: s_ashr_i32 s7, s7, 8
+; GFX1250-NEXT: s_ashr_i32 s5, s5, 8
+; GFX1250-NEXT: s_min_i32 s8, s9, s8
+; GFX1250-NEXT: s_min_i32 s4, s4, s6
+; GFX1250-NEXT: s_min_i32 s2, s2, s3
+; GFX1250-NEXT: s_min_i32 s3, s5, s7
+; GFX1250-NEXT: s_and_b32 s5, s8, 0xff
+; GFX1250-NEXT: s_lshl_b32 s4, s4, 8
+; GFX1250-NEXT: s_lshl_b32 s3, s3, 8
+; GFX1250-NEXT: s_and_b32 s2, s2, 0xff
+; GFX1250-NEXT: s_or_b32 s3, s5, s3
+; GFX1250-NEXT: s_or_b32 s2, s2, s4
+; GFX1250-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX1250-NEXT: s_lshl_b32 s2, s2, 16
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_or_b32 s2, s3, s2
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp sle <4 x i8> %a, %b
%val = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
store <4 x i8> %val, ptr addrspace(1) %out
@@ -862,6 +973,15 @@ define amdgpu_kernel void @s_test_imin_sle_v2i16(ptr addrspace(1) %out, <2 x i16
; GFX11-NEXT: v_pk_min_i16 v1, s2, s3
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_sle_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_min_i16 v1, s2, s3
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp sle <2 x i16> %a, %b
%val = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
store <2 x i16> %val, ptr addrspace(1) %out
@@ -998,6 +1118,18 @@ define amdgpu_kernel void @s_test_imin_sle_v4i16(ptr addrspace(1) %out, <4 x i16
; GFX11-NEXT: v_pk_min_i16 v0, s0, s2
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[4:5]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_sle_v4i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_min_i16 v1, s1, s3
+; GFX1250-NEXT: v_pk_min_i16 v0, s0, s2
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[4:5]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp sle <4 x i16> %a, %b
%val = select <4 x i1> %cmp, <4 x i16> %a, <4 x i16> %b
store <4 x i16> %val, ptr addrspace(1) %out
@@ -1121,6 +1253,21 @@ define amdgpu_kernel void @v_test_imin_slt_i32(ptr addrspace(1) %out, ptr addrsp
; GFX11-NEXT: v_min_i32_e32 v1, v1, v2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_imin_slt_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds i32, ptr addrspace(1) %aptr, i32 %tid
%b.gep = getelementptr inbounds i32, ptr addrspace(1) %bptr, i32 %tid
@@ -1283,6 +1430,21 @@ define amdgpu_kernel void @v_test_imin_slt_i16(ptr addrspace(1) %out, ptr addrsp
; GFX11-FAKE16-NEXT: v_min_i16 v1, v1, v2
; GFX11-FAKE16-NEXT: global_store_b16 v0, v1, s[0:1]
; GFX11-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_imin_slt_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_u16 v1, v0, s[2:3] scale_offset
+; GFX1250-NEXT: global_load_u16 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_i16 v1, v1, v2
+; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] scale_offset
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %aptr, i32 %tid
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %bptr, i32 %tid
@@ -1366,6 +1528,17 @@ define amdgpu_kernel void @s_test_imin_slt_i32(ptr addrspace(1) %out, i32 %a, i3
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_slt_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_i32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp slt i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, ptr addrspace(1) %out, align 4
@@ -1459,6 +1632,20 @@ define amdgpu_kernel void @s_test_imin_slt_v2i32(ptr addrspace(1) %out, <2 x i32
; GFX11-NEXT: v_mov_b32_e32 v1, s1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[4:5]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_slt_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_i32 s0, s0, s2
+; GFX1250-NEXT: s_min_i32 s1, s1, s3
+; GFX1250-NEXT: v_mov_b32_e32 v0, s0
+; GFX1250-NEXT: v_mov_b32_e32 v1, s1
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[4:5]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp slt <2 x i32> %a, %b
%val = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b
store <2 x i32> %val, ptr addrspace(1) %out
@@ -1542,6 +1729,17 @@ define amdgpu_kernel void @s_test_imin_slt_imm_i32(ptr addrspace(1) %out, i32 %a
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_slt_imm_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_i32 s2, s2, 8
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp slt i32 %a, 8
%val = select i1 %cmp, i32 %a, i32 8
store i32 %val, ptr addrspace(1) %out, align 4
@@ -1625,6 +1823,17 @@ define amdgpu_kernel void @s_test_imin_sle_imm_i32(ptr addrspace(1) %out, i32 %a
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_sle_imm_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_i32 s2, s2, 8
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp sle i32 %a, 8
%val = select i1 %cmp, i32 %a, i32 8
store i32 %val, ptr addrspace(1) %out, align 4
@@ -1748,6 +1957,21 @@ define amdgpu_kernel void @v_test_umin_ule_i32(ptr addrspace(1) %out, ptr addrsp
; GFX11-NEXT: v_min_u32_e32 v1, v1, v2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_umin_ule_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_u32_e32 v1, v1, v2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds i32, ptr addrspace(1) %a.ptr, i32 %tid
%b.gep = getelementptr inbounds i32, ptr addrspace(1) %b.ptr, i32 %tid
@@ -1893,6 +2117,25 @@ define amdgpu_kernel void @v_test_umin_ule_v3i32(ptr addrspace(1) %out, ptr addr
; GFX11-NEXT: v_min_u32_e32 v0, v0, v3
; GFX11-NEXT: global_store_b96 v6, v[0:2], s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_umin_ule_v3i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 4, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b96 v[0:2], v3, s[2:3]
+; GFX1250-NEXT: global_load_b96 v[4:6], v3, s[4:5]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX1250-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX1250-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[0:1]
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds <3 x i32>, ptr addrspace(1) %a.ptr, i32 %tid
%b.gep = getelementptr inbounds <3 x i32>, ptr addrspace(1) %b.ptr, i32 %tid
@@ -2068,6 +2311,26 @@ define amdgpu_kernel void @v_test_umin_ule_v3i16(ptr addrspace(1) %out, ptr addr
; GFX11-NEXT: global_store_b16 v4, v1, s[0:1] offset:4
; GFX11-NEXT: global_store_b32 v4, v0, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_umin_ule_v3i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[2:3] scale_offset
+; GFX1250-NEXT: global_load_b64 v[2:3], v4, s[4:5] scale_offset
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: v_lshlrev_b32_e32 v4, 3, v4
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_pk_min_u16 v1, v1, v3
+; GFX1250-NEXT: v_pk_min_u16 v0, v0, v2
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_store_b16 v4, v1, s[0:1] offset:4
+; GFX1250-NEXT: global_store_b32 v4, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds <3 x i16>, ptr addrspace(1) %a.ptr, i32 %tid
%b.gep = getelementptr inbounds <3 x i16>, ptr addrspace(1) %b.ptr, i32 %tid
@@ -2151,6 +2414,17 @@ define amdgpu_kernel void @s_test_umin_ule_i32(ptr addrspace(1) %out, i32 %a, i3
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_umin_ule_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_u32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp ule i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, ptr addrspace(1) %out, align 4
@@ -2274,6 +2548,21 @@ define amdgpu_kernel void @v_test_umin_ult_i32(ptr addrspace(1) %out, ptr addrsp
; GFX11-NEXT: v_min_u32_e32 v1, v1, v2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_umin_ult_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_u32_e32 v1, v1, v2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds i32, ptr addrspace(1) %a.ptr, i32 %tid
%b.gep = getelementptr inbounds i32, ptr addrspace(1) %b.ptr, i32 %tid
@@ -2419,6 +2708,21 @@ define amdgpu_kernel void @v_test_umin_ult_i8(ptr addrspace(1) %out, ptr addrspa
; GFX11-FAKE16-NEXT: v_min_u16 v1, v1, v2
; GFX11-FAKE16-NEXT: global_store_b8 v0, v1, s[0:1]
; GFX11-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_umin_ult_i8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_u8 v1, v0, s[2:3]
+; GFX1250-NEXT: global_load_u8 v2, v0, s[4:5]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_u16 v1, v1, v2
+; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds i8, ptr addrspace(1) %a.ptr, i32 %tid
%b.gep = getelementptr inbounds i8, ptr addrspace(1) %b.ptr, i32 %tid
@@ -2502,6 +2806,17 @@ define amdgpu_kernel void @s_test_umin_ult_i32(ptr addrspace(1) %out, i32 %a, i3
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_umin_ult_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_u32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp ult i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, ptr addrspace(1) %out, align 4
@@ -2645,6 +2960,27 @@ define amdgpu_kernel void @v_test_umin_ult_i32_multi_use(ptr addrspace(1) %out0,
; GFX11-NEXT: global_store_b32 v1, v2, s[0:1]
; GFX11-NEXT: global_store_b8 v1, v0, s[2:3]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_umin_ult_i32_multi_use:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b32 s5, s[6:7], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_cmp_lt_u32 s4, s5
+; GFX1250-NEXT: s_cselect_b32 s6, -1, 0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, s6
+; GFX1250-NEXT: s_and_b32 s6, s6, exec_lo
+; GFX1250-NEXT: s_cselect_b32 s4, s4, s5
+; GFX1250-NEXT: v_mov_b32_e32 v2, s4
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_store_b32 v1, v2, s[0:1]
+; GFX1250-NEXT: global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT: s_endpgm
%a = load i32, ptr addrspace(1) %aptr, align 4
%b = load i32, ptr addrspace(1) %bptr, align 4
%cmp = icmp ult i32 %a, %b
@@ -2821,6 +3157,27 @@ define amdgpu_kernel void @v_test_umin_ult_i16_multi_use(ptr addrspace(1) %out0,
; GFX11-FAKE16-NEXT: global_store_b16 v0, v1, s[0:1]
; GFX11-FAKE16-NEXT: global_store_b8 v0, v2, s[2:3]
; GFX11-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_umin_ult_i16_multi_use:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_u16 v1, v0, s[6:7]
+; GFX1250-NEXT: global_load_u16 v2, v0, s[4:5]
+; GFX1250-NEXT: s_wait_loadcnt 0x1
+; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff, v1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_and_b32_e32 v4, 0xffff, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, v4, v3
+; GFX1250-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX1250-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1]
+; GFX1250-NEXT: global_store_b8 v0, v2, s[2:3]
+; GFX1250-NEXT: s_endpgm
%a = load i16, ptr addrspace(1) %aptr, align 2
%b = load i16, ptr addrspace(1) %bptr, align 2
%cmp = icmp ult i16 %a, %b
@@ -2900,6 +3257,17 @@ define amdgpu_kernel void @s_test_umin_ult_v1i32(ptr addrspace(1) %out, <1 x i32
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_umin_ult_v1i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_u32 s2, s2, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp ult <1 x i32> %a, %b
%val = select <1 x i1> %cmp, <1 x i32> %a, <1 x i32> %b
store <1 x i32> %val, ptr addrspace(1) %out
@@ -3078,6 +3446,34 @@ define amdgpu_kernel void @s_test_umin_ult_v8i32(ptr addrspace(1) %out, <8 x i32
; GFX11-NEXT: global_store_b128 v8, v[0:3], s[0:1] offset:16
; GFX11-NEXT: global_store_b128 v8, v[4:7], s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_umin_ult_v8i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b512 s[8:23], s[4:5], 0x20
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v8, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_min_u32 s4, s9, s17
+; GFX1250-NEXT: s_min_u32 s5, s8, s16
+; GFX1250-NEXT: s_min_u32 s6, s15, s23
+; GFX1250-NEXT: s_min_u32 s7, s14, s22
+; GFX1250-NEXT: s_min_u32 s8, s12, s20
+; GFX1250-NEXT: s_min_u32 s9, s13, s21
+; GFX1250-NEXT: s_min_u32 s2, s11, s19
+; GFX1250-NEXT: s_min_u32 s3, s10, s18
+; GFX1250-NEXT: v_mov_b32_e32 v0, s8
+; GFX1250-NEXT: v_mov_b32_e32 v1, s9
+; GFX1250-NEXT: v_mov_b32_e32 v2, s7
+; GFX1250-NEXT: v_mov_b32_e32 v3, s6
+; GFX1250-NEXT: v_mov_b32_e32 v4, s5
+; GFX1250-NEXT: v_mov_b32_e32 v5, s4
+; GFX1250-NEXT: v_mov_b32_e32 v6, s3
+; GFX1250-NEXT: v_mov_b32_e32 v7, s2
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_store_b128 v8, v[0:3], s[0:1] offset:16
+; GFX1250-NEXT: global_store_b128 v8, v[4:7], s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp ult <8 x i32> %a, %b
%val = select <8 x i1> %cmp, <8 x i32> %a, <8 x i32> %b
store <8 x i32> %val, ptr addrspace(1) %out
@@ -3270,6 +3666,20 @@ define amdgpu_kernel void @s_test_umin_ult_v8i16(ptr addrspace(1) %out, <8 x i16
; GFX11-NEXT: v_pk_min_u16 v0, s8, s12
; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_umin_ult_v8i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x10
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v4, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_min_u16 v3, s11, s15
+; GFX1250-NEXT: v_pk_min_u16 v2, s10, s14
+; GFX1250-NEXT: v_pk_min_u16 v1, s9, s13
+; GFX1250-NEXT: v_pk_min_u16 v0, s8, s12
+; GFX1250-NEXT: global_store_b128 v4, v[0:3], s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp ult <8 x i16> %a, %b
%val = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
store <8 x i16> %val, ptr addrspace(1) %out
@@ -3380,6 +3790,22 @@ define amdgpu_kernel void @simplify_demanded_bits_test_umin_ult_i16(ptr addrspac
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simplify_demanded_bits_test_umin_ult_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x2
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x28
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x4c
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX1250-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_min_u32 s2, s2, s3
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%a.ext = zext i16 %a to i32
%b.ext = zext i16 %b to i32
%cmp = icmp ult i32 %a.ext, %b.ext
@@ -3493,6 +3919,22 @@ define amdgpu_kernel void @simplify_demanded_bits_test_min_slt_i16(ptr addrspace
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simplify_demanded_bits_test_min_slt_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_clause 0x2
+; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x28
+; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x4c
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_sext_i32_i16 s2, s2
+; GFX1250-NEXT: s_sext_i32_i16 s3, s3
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_min_i32 s2, s2, s3
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%a.ext = sext i16 %a to i32
%b.ext = sext i16 %b to i32
%cmp = icmp slt i32 %a.ext, %b.ext
@@ -3609,6 +4051,19 @@ define amdgpu_kernel void @s_test_imin_sle_i16(ptr addrspace(1) %out, i16 %a, i1
; GFX11-NEXT: v_mov_b32_e32 v1, s2
; GFX11-NEXT: global_store_b16 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: s_test_imin_sle_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_ashr_i32 s3, s2, 16
+; GFX1250-NEXT: s_sext_i32_i16 s2, s2
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_min_i32 s2, s2, s3
+; GFX1250-NEXT: v_mov_b32_e32 v1, s2
+; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
%cmp = icmp sle i16 %a, %b
%val = select i1 %cmp, i16 %a, i16 %b
store i16 %val, ptr addrspace(1) %out
@@ -3724,6 +4179,17 @@ define amdgpu_kernel void @test_umin_ult_i64(ptr addrspace(1) %out, i64 %a, i64
; GFX11-NEXT: v_mov_b32_e32 v1, s3
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: test_umin_ult_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
%tmp = icmp ult i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, ptr addrspace(1) %out, align 8
@@ -3837,6 +4303,17 @@ define amdgpu_kernel void @test_umin_ule_i64(ptr addrspace(1) %out, i64 %a, i64
; GFX11-NEXT: v_mov_b32_e32 v1, s3
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: test_umin_ule_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
%tmp = icmp ule i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, ptr addrspace(1) %out, align 8
@@ -3950,6 +4427,17 @@ define amdgpu_kernel void @test_imin_slt_i64(ptr addrspace(1) %out, i64 %a, i64
; GFX11-NEXT: v_mov_b32_e32 v1, s3
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: test_imin_slt_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
%tmp = icmp slt i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, ptr addrspace(1) %out, align 8
@@ -4063,6 +4551,17 @@ define amdgpu_kernel void @test_imin_sle_i64(ptr addrspace(1) %out, i64 %a, i64
; GFX11-NEXT: v_mov_b32_e32 v1, s3
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: test_imin_sle_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[4:5]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
%tmp = icmp sle i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, ptr addrspace(1) %out, align 8
@@ -4214,6 +4713,21 @@ define amdgpu_kernel void @v_test_imin_sle_v2i16(ptr addrspace(1) %out, ptr addr
; GFX11-NEXT: v_pk_min_i16 v1, v1, v2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_imin_sle_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_pk_min_i16 v1, v1, v2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %a.ptr, i32 %tid
%b.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %b.ptr, i32 %tid
@@ -4369,6 +4883,21 @@ define amdgpu_kernel void @v_test_imin_ule_v2i16(ptr addrspace(1) %out, ptr addr
; GFX11-NEXT: v_pk_min_u16 v1, v1, v2
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_test_imin_ule_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset
+; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_pk_min_u16 v1, v1, v2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
+; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%a.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %a.ptr, i32 %tid
%b.gep = getelementptr inbounds <2 x i16>, ptr addrspace(1) %b.ptr, i32 %tid
@@ -4385,3 +4914,5 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250-FAKE16: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll
index 8d3716e..7e3d5c9 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.ll
@@ -3548,28 +3548,27 @@ define amdgpu_kernel void @v_mul_i128(ptr addrspace(1) %out, ptr addrspace(1) %a
; GFX1250-NEXT: s_wait_loadcnt 0x1
; GFX1250-NEXT: v_dual_mov_b32 v11, 0 :: v_dual_mov_b32 v10, v0
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX1250-NEXT: v_dual_mov_b32 v9, v11 :: v_dual_mov_b32 v8, v4
; GFX1250-NEXT: v_mul_u64_e32 v[6:7], v[0:1], v[6:7]
-; GFX1250-NEXT: v_mul_lo_u32 v3, v3, v4
; GFX1250-NEXT: v_mul_u64_e32 v[8:9], v[8:9], v[10:11]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[6:7], null, v2, v4, v[6:7]
-; GFX1250-NEXT: v_mul_lo_u32 v2, v2, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[6:7], v2, v4, v[6:7]
; GFX1250-NEXT: v_mov_b32_e32 v10, v9
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX1250-NEXT: v_mad_co_u64_u32 v[12:13], null, v5, v0, v[10:11]
-; GFX1250-NEXT: v_add3_u32 v7, v3, v7, v2
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[12:13], v5, v0, v[10:11]
+; GFX1250-NEXT: v_mad_u32 v0, v3, v4, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1250-NEXT: v_dual_mov_b32 v10, v13 :: v_dual_mov_b32 v13, v11
-; GFX1250-NEXT: v_mad_co_u64_u32 v[12:13], null, v4, v1, v[12:13]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mad_u32 v7, v2, v5, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[12:13], v4, v1, v[12:13]
; GFX1250-NEXT: v_dual_mov_b32 v15, v11 :: v_dual_mov_b32 v9, v12
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_mov_b32_e32 v14, v13
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_add_nc_u64_e32 v[10:11], v[10:11], v[14:15]
-; GFX1250-NEXT: v_mad_co_u64_u32 v[0:1], null, v5, v1, v[10:11]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[0:1], v5, v1, v[10:11]
; GFX1250-NEXT: v_add_nc_u64_e32 v[10:11], v[0:1], v[6:7]
; GFX1250-NEXT: global_store_b128 v16, v[8:11], s[2:3] scale_offset
; GFX1250-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
index 228420e..9f0ffbc 100644
--- a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -56,7 +56,7 @@ define float @v_rcp_f32_ieee_unsafe(float %x) #4 {
; R600: ; %bb.0:
; R600-NEXT: CF_END
; R600-NEXT: PAD
- %rcp = fdiv float 1.0, %x
+ %rcp = fdiv afn float 1.0, %x
ret float %rcp
}
@@ -1411,10 +1411,10 @@ define amdgpu_kernel void @s_div_arcp_neg_k_x_pat_f32_daz(ptr addrspace(1) %out)
declare float @llvm.fabs.f32(float) #1
declare float @llvm.sqrt.f32(float) #1
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
!0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/reassoc-mul-add-1-to-mad.ll b/llvm/test/CodeGen/AMDGPU/reassoc-mul-add-1-to-mad.ll
index d89e572..25609e8 100644
--- a/llvm/test/CodeGen/AMDGPU/reassoc-mul-add-1-to-mad.ll
+++ b/llvm/test/CodeGen/AMDGPU/reassoc-mul-add-1-to-mad.ll
@@ -5,6 +5,7 @@
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX900 %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX9,GFX90A %s
; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s
; We want to undo these canonicalizations to enable mad matching:
; (x * y) + x --> x * (y + 1)
@@ -36,6 +37,13 @@ define i32 @v_mul_add_1_i32(i32 %x, i32 %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, v[0:1]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i32 %y, 1
%mul = mul i32 %x, %add
ret i32 %mul
@@ -67,6 +75,13 @@ define i32 @v_mul_add_1_i32_commute(i32 %x, i32 %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, v[0:1]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i32_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i32 %y, 1
%mul = mul i32 %add, %x
ret i32 %mul
@@ -98,6 +113,13 @@ define i32 @v_mul_add_x_i32(i32 %x, i32 %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, v[0:1]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_x_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i32 %x, %y
%add = add i32 %x, %mul
ret i32 %add
@@ -131,6 +153,15 @@ define i32 @v_mul_sub_1_i32(i32 %x, i32 %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, -1, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i32 %y, 1
%mul = mul i32 %x, %sub
ret i32 %mul
@@ -164,6 +195,15 @@ define i32 @v_mul_sub_1_i32_commute(i32 %x, i32 %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
; GFX10-NEXT: v_mul_lo_u32 v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i32_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, -1, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i32 %y, 1
%mul = mul i32 %sub, %x
ret i32 %mul
@@ -197,6 +237,15 @@ define i32 @v_mul_sub_x_i32(i32 %x, i32 %y) {
; GFX10-NEXT: v_mul_lo_u32 v1, v0, v1
; GFX10-NEXT: v_sub_nc_u32_e32 v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_x_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i32 %x, %y
%sub = sub i32 %mul, %x
ret i32 %sub
@@ -230,6 +279,15 @@ define i32 @v_mul_add_2_i32(i32 %x, i32 %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, 2, v1
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_2_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 2, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i32 %y, 2
%mul = mul i32 %x, %add
ret i32 %mul
@@ -263,6 +321,15 @@ define i32 @v_mul_sub_2_i32(i32 %x, i32 %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, -2, v1
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_2_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, -2, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i32 %y, 2
%mul = mul i32 %x, %sub
ret i32 %mul
@@ -296,6 +363,15 @@ define i32 @v_mul_add_65_i32(i32 %x, i32 %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x41, v1
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_65_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x41, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i32 %y, 65
%mul = mul i32 %x, %add
ret i32 %mul
@@ -329,6 +405,15 @@ define i32 @v_mul_sub_65_i32(i32 %x, i32 %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, 0xffffffbf, v1
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_65_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0xffffffbf, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i32 %y, 65
%mul = mul i32 %x, %sub
ret i32 %mul
@@ -362,6 +447,15 @@ define i24 @v_mul_add_1_i24_zext(i24 zeroext %x, i24 zeroext %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i24_zext:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 1, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i24 %y, 1
%mul = mul i24 %x, %add
ret i24 %mul
@@ -395,6 +489,15 @@ define i24 @v_mul_sub_1_i24_zext(i24 zeroext %x, i24 zeroext %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i24_zext:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, -1, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i24 %y, 1
%mul = mul i24 %x, %sub
ret i24 %mul
@@ -424,6 +527,13 @@ define i24 @v_add_mul_i24_zext_1(i24 zeroext %x, i24 zeroext %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u32_u24 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_add_mul_i24_zext_1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32_u24 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i24 %x, %y
%add = add i24 %mul, %x
ret i24 %add
@@ -457,6 +567,15 @@ define i24 @v_mul_add_1_i24_sext(i24 signext %x, i24 signext %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i24_sext:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 1, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i24 %y, 1
%mul = mul i24 %x, %add
ret i24 %mul
@@ -486,6 +605,13 @@ define i24 @v_add_mul_i24_sext_1(i24 signext %x, i24 signext %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u32_u24 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_add_mul_i24_sext_1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32_u24 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i24 %x, %y
%add = add i24 %mul, %x
ret i24 %add
@@ -519,6 +645,15 @@ define i24 @v_mul_sub_1_i24_sext(i24 signext %x, i24 signext %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i24_sext:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, -1, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i24 %y, 1
%mul = mul i24 %x, %sub
ret i24 %mul
@@ -550,6 +685,13 @@ define i25 @v_mul_add_1_i25_zext(i25 zeroext %x, i25 zeroext %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, v[0:1]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i25_zext:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i25 %y, 1
%mul = mul i25 %x, %add
ret i25 %mul
@@ -583,6 +725,15 @@ define i25 @v_mul_sub_1_i25_zext(i25 zeroext %x, i25 zeroext %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x1ffffff, v1
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i25_zext:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x1ffffff, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i25 %y, 1
%mul = mul i25 %x, %sub
ret i25 %mul
@@ -614,6 +765,13 @@ define i25 @v_mul_add_1_i25_sext(i25 signext %x, i25 signext %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, v[0:1]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i25_sext:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i25 %y, 1
%mul = mul i25 %x, %add
ret i25 %mul
@@ -647,6 +805,15 @@ define i25 @v_mul_sub_1_i25_sext(i25 signext %x, i25 signext %y) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, 0x1ffffff, v1
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i25_sext:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x1ffffff, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i25 %y, 1
%mul = mul i25 %x, %sub
ret i25 %mul
@@ -679,6 +846,13 @@ define i16 @v_mul_add_1_i16(i16 %x, i16 %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u16 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i16 %y, 1
%mul = mul i16 %x, %add
ret i16 %mul
@@ -713,6 +887,15 @@ define i32 @v_mul_add_1_i16_zext_result(i16 %x, i16 %y) {
; GFX10-NEXT: v_mad_u16 v0, v0, v1, v0
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i16_zext_result:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i16 %y, 1
%mul = mul i16 %x, %add
%zext = zext i16 %mul to i32
@@ -746,6 +929,13 @@ define i16 @v_mul_add_1_i16_commute(i16 %x, i16 %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u16 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i16_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i16 %y, 1
%mul = mul i16 %add, %x
ret i16 %mul
@@ -777,6 +967,13 @@ define i16 @v_mul_add_x_i16(i16 %x, i16 %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u16 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_x_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i16 %x, %y
%add = add i16 %x, %mul
ret i16 %add
@@ -812,6 +1009,15 @@ define i16 @v_mul_sub_1_i16(i16 %x, i16 %y) {
; GFX10-NEXT: v_add_nc_u16 v1, v1, -1
; GFX10-NEXT: v_mul_lo_u16 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u16 v1, v1, -1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i16 %y, 1
%mul = mul i16 %x, %sub
ret i16 %mul
@@ -847,6 +1053,15 @@ define i16 @v_mul_sub_1_i16_commute(i16 %x, i16 %y) {
; GFX10-NEXT: v_add_nc_u16 v1, v1, -1
; GFX10-NEXT: v_mul_lo_u16 v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i16_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u16 v1, v1, -1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u16 v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i16 %y, 1
%mul = mul i16 %sub, %x
ret i16 %mul
@@ -882,6 +1097,15 @@ define i16 @v_mul_sub_x_i16(i16 %x, i16 %y) {
; GFX10-NEXT: v_mul_lo_u16 v1, v0, v1
; GFX10-NEXT: v_sub_nc_u16 v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_x_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mul_lo_u16 v1, v0, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_sub_nc_u16 v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i16 %x, %y
%sub = sub i16 %mul, %x
ret i16 %sub
@@ -917,6 +1141,15 @@ define i16 @v_mul_add_2_i16(i16 %x, i16 %y) {
; GFX10-NEXT: v_add_nc_u16 v1, v1, 2
; GFX10-NEXT: v_mul_lo_u16 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_2_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u16 v1, v1, 2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i16 %y, 2
%mul = mul i16 %x, %add
ret i16 %mul
@@ -952,6 +1185,15 @@ define i16 @v_mul_sub_2_i16(i16 %x, i16 %y) {
; GFX10-NEXT: v_add_nc_u16 v1, v1, -2
; GFX10-NEXT: v_mul_lo_u16 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_2_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u16 v1, v1, -2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i16 %y, 2
%mul = mul i16 %x, %sub
ret i16 %mul
@@ -1012,6 +1254,18 @@ define i64 @v_mul_add_1_i64(i64 %x, i64 %y) {
; GFX10-NEXT: v_add3_u32 v1, v1, v5, v0
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[4:5], v0, v2, v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_u32 v1, v1, v2, v5
+; GFX1250-NEXT: v_mad_u32 v1, v0, v3, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v4
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i64 %y, 1
%mul = mul i64 %x, %add
ret i64 %mul
@@ -1072,6 +1326,18 @@ define i64 @v_mul_add_1_i64_commute(i64 %x, i64 %y) {
; GFX10-NEXT: v_add3_u32 v1, v1, v5, v0
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i64_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[4:5], v0, v2, v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_u32 v1, v1, v2, v5
+; GFX1250-NEXT: v_mad_u32 v1, v0, v3, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v4
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i64 %y, 1
%mul = mul i64 %add, %x
ret i64 %mul
@@ -1132,6 +1398,18 @@ define i64 @v_mul_add_x_i64(i64 %x, i64 %y) {
; GFX10-NEXT: v_add3_u32 v1, v1, v5, v0
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_x_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[4:5], v0, v2, v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_u32 v1, v1, v2, v5
+; GFX1250-NEXT: v_mad_u32 v1, v0, v3, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v4
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i64 %x, %y
%add = add i64 %x, %mul
ret i64 %add
@@ -1198,6 +1476,15 @@ define i64 @v_mul_sub_1_i64(i64 %x, i64 %y) {
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v2, 0
; GFX10-NEXT: v_add3_u32 v1, v1, v3, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], -1, v[2:3]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_u64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i64 %y, 1
%mul = mul i64 %x, %sub
ret i64 %mul
@@ -1264,6 +1551,15 @@ define i64 @v_mul_sub_1_i64_commute(i64 %x, i64 %y) {
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v2, v0, 0
; GFX10-NEXT: v_add3_u32 v1, v1, v4, v3
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_i64_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], -1, v[2:3]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_u64_e32 v[0:1], v[2:3], v[0:1]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i64 %y, 1
%mul = mul i64 %sub, %x
ret i64 %mul
@@ -1328,6 +1624,15 @@ define i64 @v_mul_sub_x_i64(i64 %x, i64 %y) {
; GFX10-NEXT: v_sub_co_u32 v0, vcc_lo, v2, v0
; GFX10-NEXT: v_sub_co_ci_u32_e64 v1, null, v3, v1, vcc_lo
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_x_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mul_u64_e32 v[2:3], v[0:1], v[2:3]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_sub_nc_u64_e32 v[0:1], v[2:3], v[0:1]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i64 %x, %y
%sub = sub i64 %mul, %x
ret i64 %sub
@@ -1394,6 +1699,15 @@ define i64 @v_mul_add_2_i64(i64 %x, i64 %y) {
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v2, 0
; GFX10-NEXT: v_add3_u32 v1, v1, v3, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_2_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], 2, v[2:3]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_u64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i64 %y, 2
%mul = mul i64 %x, %add
ret i64 %mul
@@ -1460,6 +1774,15 @@ define i64 @v_mul_sub_2_i64(i64 %x, i64 %y) {
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v2, 0
; GFX10-NEXT: v_add3_u32 v1, v1, v3, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_2_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], -2, v[2:3]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_u64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub i64 %y, 2
%mul = mul i64 %x, %sub
ret i64 %mul
@@ -1508,6 +1831,14 @@ define <2 x i32> @v_mul_add_1_i32_multiple(i32 %x, i32 %y, i32 %z) {
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v3, v[0:1]
; GFX10-NEXT: v_mad_u64_u32 v[1:2], null, v2, v3, v[2:3]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i32_multiple:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, v0
+; GFX1250-NEXT: v_mad_u32 v1, v2, v1, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i32 %y, 1
%mul0 = mul i32 %x, %add
%mul1 = mul i32 %z, %add
@@ -1544,6 +1875,15 @@ define <2 x i32> @v_mul_add_1_i32_other_use(i32 %x, i32 %y, i32 %z) {
; GFX10-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i32_other_use:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 1, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i32 %y, 1
%mul0 = mul i32 %x, %add
%mul1 = mul i32 %z, %add
@@ -1594,6 +1934,19 @@ define i32 @v_mul_add_1_i32_chain(i32 %arg0, i32 %arg1, i32 %arg2) {
; GFX10-NEXT: v_mul_lo_u32 v0, v2, v0
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, v[0:1]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i32_chain:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v2, 1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v1, v2, v1
+; GFX1250-NEXT: v_add_nc_u32_e32 v2, v1, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v2, v0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%i2 = add i32 %arg0, 1
%i3 = mul i32 %i2, %arg1
%i4 = add i32 %i3, %i2
@@ -1640,6 +1993,15 @@ define <2 x i16> @v_mul_add_1_v2i16(<2 x i16> %x, <2 x i16> %y) {
; GFX10-NEXT: v_pk_add_u16 v1, v1, 1 op_sel_hi:[1,0]
; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_add_u16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i16> %y, <i16 1, i16 1>
%mul = mul <2 x i16> %x, %add
ret <2 x i16> %mul
@@ -1683,6 +2045,15 @@ define <2 x i16> @v_mul_add_1_v2i16_commute(<2 x i16> %x, <2 x i16> %y) {
; GFX10-NEXT: v_pk_add_u16 v1, v1, 1 op_sel_hi:[1,0]
; GFX10-NEXT: v_pk_mul_lo_u16 v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_v2i16_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_add_u16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i16> %y, <i16 1, i16 1>
%mul = mul <2 x i16> %add, %x
ret <2 x i16> %mul
@@ -1726,6 +2097,13 @@ define <2 x i16> @v_mul_add_x_v2i16(<2 x i16> %x, <2 x i16> %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_pk_mad_u16 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_x_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul <2 x i16> %x, %y
%add = add <2 x i16> %x, %mul
ret <2 x i16> %add
@@ -1769,6 +2147,15 @@ define <2 x i16> @v_mul_sub_1_v2i16(<2 x i16> %x, <2 x i16> %y) {
; GFX10-NEXT: v_pk_sub_i16 v1, v1, 1 op_sel_hi:[1,0]
; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_sub_i16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub <2 x i16> %y, <i16 1, i16 1>
%mul = mul <2 x i16> %x, %sub
ret <2 x i16> %mul
@@ -1812,6 +2199,15 @@ define <2 x i16> @v_mul_sub_1_v2i16_commute(<2 x i16> %x, <2 x i16> %y) {
; GFX10-NEXT: v_pk_sub_i16 v1, v1, 1 op_sel_hi:[1,0]
; GFX10-NEXT: v_pk_mul_lo_u16 v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_v2i16_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_sub_i16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub <2 x i16> %y, <i16 1, i16 1>
%mul = mul <2 x i16> %sub, %x
ret <2 x i16> %mul
@@ -1858,6 +2254,15 @@ define <2 x i16> @v_mul_sub_x_v2i16(<2 x i16> %x, <2 x i16> %y) {
; GFX10-NEXT: v_pk_mul_lo_u16 v1, v0, v1
; GFX10-NEXT: v_pk_sub_i16 v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_x_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_mul_lo_u16 v1, v0, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_sub_i16 v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul <2 x i16> %x, %y
%sub = sub <2 x i16> %mul, %x
ret <2 x i16> %sub
@@ -1901,6 +2306,15 @@ define <2 x i16> @v_mul_add_2_v2i16(<2 x i16> %x, <2 x i16> %y) {
; GFX10-NEXT: v_pk_add_u16 v1, v1, 2 op_sel_hi:[1,0]
; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_2_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_add_u16 v1, v1, 2 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i16> %y, <i16 2, i16 2>
%mul = mul <2 x i16> %x, %add
ret <2 x i16> %mul
@@ -1944,6 +2358,15 @@ define <2 x i16> @v_mul_sub_2_v2i16(<2 x i16> %x, <2 x i16> %y) {
; GFX10-NEXT: v_pk_sub_i16 v1, v1, 2 op_sel_hi:[1,0]
; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_2_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_sub_i16 v1, v1, 2 op_sel_hi:[1,0]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub <2 x i16> %y, <i16 2, i16 2>
%mul = mul <2 x i16> %x, %sub
ret <2 x i16> %mul
@@ -1992,6 +2415,14 @@ define <2 x i32> @v_mul_add_1_v2i32(<2 x i32> %x, <2 x i32> %y) {
; GFX10-NEXT: v_mad_u64_u32 v[1:2], null, v1, v3, v[1:2]
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v2, v0
+; GFX1250-NEXT: v_mad_u32 v1, v1, v3, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i32> %y, <i32 1, i32 1>
%mul = mul <2 x i32> %x, %add
ret <2 x i32> %mul
@@ -2040,6 +2471,14 @@ define <2 x i32> @v_mul_add_1_v2i32_commute(<2 x i32> %x, <2 x i32> %y) {
; GFX10-NEXT: v_mad_u64_u32 v[1:2], null, v1, v3, v[1:2]
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_v2i32_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v2, v0
+; GFX1250-NEXT: v_mad_u32 v1, v1, v3, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i32> %y, <i32 1, i32 1>
%mul = mul <2 x i32> %add, %x
ret <2 x i32> %mul
@@ -2088,6 +2527,14 @@ define <2 x i32> @v_mul_add_x_v2i32(<2 x i32> %x, <2 x i32> %y) {
; GFX10-NEXT: v_mad_u64_u32 v[1:2], null, v1, v3, v[1:2]
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_x_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, v2, v0
+; GFX1250-NEXT: v_mad_u32 v1, v1, v3, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul <2 x i32> %x, %y
%add = add <2 x i32> %x, %mul
ret <2 x i32> %add
@@ -2129,6 +2576,16 @@ define <2 x i32> @v_mul_sub_1_v2i32(<2 x i32> %x, <2 x i32> %y) {
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v2
; GFX10-NEXT: v_mul_lo_u32 v1, v1, v3
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, -1, v2 :: v_dual_add_nc_u32 v3, -1, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX1250-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub <2 x i32> %y, <i32 1, i32 1>
%mul = mul <2 x i32> %x, %sub
ret <2 x i32> %mul
@@ -2170,6 +2627,16 @@ define <2 x i32> @v_mul_sub_1_v2i32_commute(<2 x i32> %x, <2 x i32> %y) {
; GFX10-NEXT: v_mul_lo_u32 v0, v2, v0
; GFX10-NEXT: v_mul_lo_u32 v1, v3, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_v2i32_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, -1, v2 :: v_dual_add_nc_u32 v3, -1, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v2, v0
+; GFX1250-NEXT: v_mul_lo_u32 v1, v3, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub <2 x i32> %y, <i32 1, i32 1>
%mul = mul <2 x i32> %sub, %x
ret <2 x i32> %mul
@@ -2220,6 +2687,16 @@ define <2 x i32> @v_mul_sub_x_v2i32(<2 x i32> %x, <2 x i32> %y) {
; GFX10-NEXT: v_sub_nc_u32_e32 v0, v2, v0
; GFX10-NEXT: v_sub_nc_u32_e32 v1, v3, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_x_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mul_lo_u32 v2, v0, v2
+; GFX1250-NEXT: v_mul_lo_u32 v3, v1, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_sub_nc_u32 v0, v2, v0 :: v_dual_sub_nc_u32 v1, v3, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul <2 x i32> %x, %y
%sub = sub <2 x i32> %mul, %x
ret <2 x i32> %sub
@@ -2261,6 +2738,16 @@ define <2 x i32> @v_mul_add_2_v2i32(<2 x i32> %x, <2 x i32> %y) {
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v2
; GFX10-NEXT: v_mul_lo_u32 v1, v1, v3
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_2_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, 2, v2 :: v_dual_add_nc_u32 v3, 2, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX1250-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i32> %y, <i32 2, i32 2>
%mul = mul <2 x i32> %x, %add
ret <2 x i32> %mul
@@ -2302,6 +2789,16 @@ define <2 x i32> @v_mul_sub_2_v2i32(<2 x i32> %x, <2 x i32> %y) {
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v2
; GFX10-NEXT: v_mul_lo_u32 v1, v1, v3
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_2_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, -2, v2 :: v_dual_add_nc_u32 v3, -2, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX1250-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub <2 x i32> %y, <i32 2, i32 2>
%mul = mul <2 x i32> %x, %sub
ret <2 x i32> %mul
@@ -2343,6 +2840,16 @@ define <2 x i24> @v_mul_add_1_v2i24(<2 x i24> %x, <2 x i24> %y) {
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v0, v2
; GFX10-NEXT: v_mul_u32_u24_e32 v1, v1, v3
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_v2i24:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, 1, v2 :: v_dual_add_nc_u32 v3, 1, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v0, v2
+; GFX1250-NEXT: v_mul_u32_u24_e32 v1, v1, v3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i24> %y, <i24 1, i24 1>
%mul = mul <2 x i24> %x, %add
ret <2 x i24> %mul
@@ -2384,6 +2891,16 @@ define <2 x i24> @v_mul_add_1_v2i24_commute(<2 x i24> %x, <2 x i24> %y) {
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v2, v0
; GFX10-NEXT: v_mul_u32_u24_e32 v1, v3, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_v2i24_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, 1, v2 :: v_dual_add_nc_u32 v3, 1, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v2, v0
+; GFX1250-NEXT: v_mul_u32_u24_e32 v1, v3, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i24> %y, <i24 1, i24 1>
%mul = mul <2 x i24> %add, %x
ret <2 x i24> %mul
@@ -2417,6 +2934,14 @@ define <2 x i24> @v_mul_add_x_v2i24(<2 x i24> %x, <2 x i24> %y) {
; GFX10-NEXT: v_mad_u32_u24 v0, v0, v2, v0
; GFX10-NEXT: v_mad_u32_u24 v1, v1, v3, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_x_v2i24:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32_u24 v0, v0, v2, v0
+; GFX1250-NEXT: v_mad_u32_u24 v1, v1, v3, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul <2 x i24> %x, %y
%add = add <2 x i24> %x, %mul
ret <2 x i24> %add
@@ -2458,6 +2983,16 @@ define <2 x i24> @v_mul_sub_1_v2i24(<2 x i24> %x, <2 x i24> %y) {
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v0, v2
; GFX10-NEXT: v_mul_u32_u24_e32 v1, v1, v3
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_v2i24:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, -1, v2 :: v_dual_add_nc_u32 v3, -1, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v0, v2
+; GFX1250-NEXT: v_mul_u32_u24_e32 v1, v1, v3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub <2 x i24> %y, <i24 1, i24 1>
%mul = mul <2 x i24> %x, %sub
ret <2 x i24> %mul
@@ -2499,6 +3034,16 @@ define <2 x i24> @v_mul_sub_1_v2i24_commute(<2 x i24> %x, <2 x i24> %y) {
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v2, v0
; GFX10-NEXT: v_mul_u32_u24_e32 v1, v3, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_1_v2i24_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, -1, v2 :: v_dual_add_nc_u32 v3, -1, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v2, v0
+; GFX1250-NEXT: v_mul_u32_u24_e32 v1, v3, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub <2 x i24> %y, <i24 1, i24 1>
%mul = mul <2 x i24> %sub, %x
ret <2 x i24> %mul
@@ -2540,6 +3085,16 @@ define <2 x i24> @v_mul_sub_x_v2i24(<2 x i24> %x, <2 x i24> %y) {
; GFX10-NEXT: v_sub_nc_u32_e32 v0, v2, v0
; GFX10-NEXT: v_sub_nc_u32_e32 v1, v3, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_x_v2i24:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mul_u32_u24_e32 v2, v0, v2
+; GFX1250-NEXT: v_mul_u32_u24_e32 v3, v1, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_sub_nc_u32 v0, v2, v0 :: v_dual_sub_nc_u32 v1, v3, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul <2 x i24> %x, %y
%sub = sub <2 x i24> %mul, %x
ret <2 x i24> %sub
@@ -2581,6 +3136,16 @@ define <2 x i24> @v_mul_add_2_v2i24(<2 x i24> %x, <2 x i24> %y) {
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v0, v2
; GFX10-NEXT: v_mul_u32_u24_e32 v1, v1, v3
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_2_v2i24:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, 2, v2 :: v_dual_add_nc_u32 v3, 2, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v0, v2
+; GFX1250-NEXT: v_mul_u32_u24_e32 v1, v1, v3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i24> %y, <i24 2, i24 2>
%mul = mul <2 x i24> %x, %add
ret <2 x i24> %mul
@@ -2622,6 +3187,16 @@ define <2 x i24> @v_mul_sub_2_v2i24(<2 x i24> %x, <2 x i24> %y) {
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v0, v2
; GFX10-NEXT: v_mul_u32_u24_e32 v1, v1, v3
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_sub_2_v2i24:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, -2, v2 :: v_dual_add_nc_u32 v3, -2, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v0, v2
+; GFX1250-NEXT: v_mul_u32_u24_e32 v1, v1, v3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%sub = sub <2 x i24> %y, <i24 2, i24 2>
%mul = mul <2 x i24> %x, %sub
ret <2 x i24> %mul
@@ -2653,6 +3228,13 @@ define i32 @v_mul_9_add_52_i32(i32 %arg) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, 9, 52
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_9_add_52_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, 9, 52
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i32 %arg, 9
%add = add i32 %mul, 52
ret i32 %add
@@ -2683,6 +3265,13 @@ define i16 @v_mul_9_add_52_i16(i16 %arg) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u16 v0, v0, 9, 52
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_9_add_52_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, 9, 52
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i16 %arg, 9
%add = add i16 %mul, 52
ret i16 %add
@@ -2723,6 +3312,13 @@ define <2 x i16> @v_mul_9_add_52_v2i16(<2 x i16> %arg) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_pk_mad_u16 v0, v0, 9, 52 op_sel_hi:[1,0,0]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_9_add_52_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v0, 9, 52 op_sel_hi:[1,0,0]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul <2 x i16> %arg, <i16 9, i16 9>
%add = add <2 x i16> %mul, <i16 52, i16 52>
ret <2 x i16> %add
@@ -2781,6 +3377,16 @@ define i64 @v_mul_9_add_52_i64(i64 %arg) {
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, 9, 52
; GFX10-NEXT: v_mad_u64_u32 v[1:2], null, v2, 9, v[1:2]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_9_add_52_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v2, v1
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[0:1], v0, 9, 52
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_u32 v1, v2, 9, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i64 %arg, 9
%add = add i64 %mul, 52
ret i64 %add
@@ -2812,6 +3418,13 @@ define i32 @v_mul_5_add_1_i32(i32 %arg) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, 5, 1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_5_add_1_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u32 v0, v0, 5, 1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i32 %arg, 5
%add = add i32 %mul, 1
ret i32 %add
@@ -2848,6 +3461,15 @@ define i32 @v_mul_284_add_82_i32(i32 %arg) {
; GFX10-NEXT: s_movk_i32 s4, 0x11c
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, s4, 0x52
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_284_add_82_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_movk_i32 s0, 0x11c
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mad_u32 v0, v0, s0, 0x52
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i32 %arg, 284
%add = add i32 %mul, 82
ret i32 %add
@@ -2878,6 +3500,13 @@ define i16 @v_mul_5_add_1_i16(i16 %arg) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u16 v0, v0, 5, 1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_5_add_1_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, 5, 1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i16 %arg, 5
%add = add i16 %mul, 1
ret i16 %add
@@ -2915,6 +3544,15 @@ define i16 @v_mul_284_add_82_i16(i16 %arg) {
; GFX10-NEXT: s_movk_i32 s4, 0x11c
; GFX10-NEXT: v_mad_u16 v0, v0, s4, 0x52
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_284_add_82_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_movk_i32 s0, 0x11c
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mad_u16 v0, v0, s0, 0x52
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i16 %arg, 284
%add = add i16 %mul, 82
ret i16 %add
@@ -2955,6 +3593,13 @@ define <2 x i16> @v_mul_5_add_1_v2i16(<2 x i16> %arg) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_pk_mad_u16 v0, v0, 5, 1 op_sel_hi:[1,0,0]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_5_add_1_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_mad_u16 v0, v0, 5, 1 op_sel_hi:[1,0,0]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul <2 x i16> %arg, <i16 5, i16 5>
%add = add <2 x i16> %mul, <i16 1, i16 1>
ret <2 x i16> %add
@@ -3002,6 +3647,15 @@ define <2 x i16> @v_mul_284_add_82_v2i16(<2 x i16> %arg) {
; GFX10-NEXT: s_movk_i32 s4, 0x11c
; GFX10-NEXT: v_pk_mad_u16 v0, v0, s4, 0x52 op_sel_hi:[1,0,0]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_284_add_82_v2i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_movk_i32 s0, 0x11c
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_pk_mad_u16 v0, v0, s0, 0x52 op_sel_hi:[1,0,0]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul <2 x i16> %arg, <i16 284, i16 284>
%add = add <2 x i16> %mul, <i16 82, i16 82>
ret <2 x i16> %add
@@ -3060,6 +3714,16 @@ define i64 @v_mul_5_add_1_i64(i64 %arg) {
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, 5, 1
; GFX10-NEXT: v_mad_u64_u32 v[1:2], null, v2, 5, v[1:2]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_5_add_1_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v2, v1
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[0:1], v0, 5, 1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_u32 v1, v2, 5, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i64 %arg, 5
%add = add i64 %mul, 1
ret i64 %add
@@ -3132,6 +3796,17 @@ define i64 @v_mul_284_add_82_i64(i64 %arg) {
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, s4, 0x52
; GFX10-NEXT: v_mad_u64_u32 v[1:2], null, 0x11c, v2, v[1:2]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_284_add_82_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_movk_i32 s0, 0x11c
+; GFX1250-NEXT: v_mov_b32_e32 v2, v1
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[0:1], v0, s0, 0x52
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_u32 v1, 0x11c, v2, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i64 %arg, 284
%add = add i64 %mul, 82
ret i64 %add
@@ -3204,6 +3879,17 @@ define i64 @v_mul_934584645_add_8234599_i64(i64 %arg) {
; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, s4, 0x7da667
; GFX10-NEXT: v_mad_u64_u32 v[1:2], null, 0x37b4a145, v2, v[1:2]
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_934584645_add_8234599_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, 0x37b4a145
+; GFX1250-NEXT: v_mov_b32_e32 v2, v1
+; GFX1250-NEXT: v_mad_nc_u64_u32 v[0:1], v0, s0, 0x7da667
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mad_u32 v1, 0x37b4a145, v2, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%mul = mul i64 %arg, 934584645
%add = add i64 %mul, 8234599
ret i64 %add
@@ -3394,6 +4080,44 @@ define amdgpu_kernel void @compute_mad(ptr addrspace(4) %i18, ptr addrspace(4) %
; GFX10-NEXT: v_add_co_ci_u32_e64 v2, null, s5, v3, vcc_lo
; GFX10-NEXT: global_store_dword v[1:2], v0, off
; GFX10-NEXT: s_endpgm
+;
+; GFX1250-LABEL: compute_mad:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: s_load_b96 s[8:10], s[4:5], 0x10
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_add_co_i32 s0, s10, 1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mul_lo_u32 v1, s0, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, s0, v1 :: v_dual_add_nc_u32 v1, 1, v1
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_mul_lo_u32 v2, v2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v3, v2, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_b32 s2, s[2:3], 0x4
+; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, v3, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX1250-NEXT: v_add_nc_u32_e32 v2, 1, v3
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX1250-NEXT: v_mul_lo_u32 v3, v1, v2
+; GFX1250-NEXT: v_mad_u32 v0, ttmp9, s2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_nc_u32_e32 v2, v3, v2
+; GFX1250-NEXT: v_mul_lo_u32 v2, v2, v1
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
+; GFX1250-NEXT: v_mad_u32 v3, v2, v3, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 2, s[8:9]
+; GFX1250-NEXT: v_mad_u32 v2, v3, v2, v3
+; GFX1250-NEXT: global_store_b32 v[0:1], v2, off
+; GFX1250-NEXT: s_endpgm
bb:
%i = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0
%i2 = add i32 %arg1, 1
@@ -3450,6 +4174,13 @@ define amdgpu_ps i32 @s_mul_add_1_i32(i32 inreg %x, i32 inreg %y) {
; GFX10-NEXT: s_add_i32 s1, s1, 1
; GFX10-NEXT: s_mul_i32 s0, s0, s1
; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: s_mul_add_1_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_add_co_i32 s1, s1, 1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_mul_i32 s0, s0, s1
+; GFX1250-NEXT: ; return to shader part epilog
%add = add i32 %y, 1
%mul = mul i32 %x, %add
ret i32 %mul
@@ -3479,6 +4210,13 @@ define amdgpu_ps i32 @s_mul_add_1_i32_commute(i32 inreg %x, i32 inreg %y) {
; GFX10-NEXT: s_add_i32 s1, s1, 1
; GFX10-NEXT: s_mul_i32 s0, s1, s0
; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: s_mul_add_1_i32_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_add_co_i32 s1, s1, 1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_mul_i32 s0, s1, s0
+; GFX1250-NEXT: ; return to shader part epilog
%add = add i32 %y, 1
%mul = mul i32 %add, %x
ret i32 %mul
@@ -3511,6 +4249,13 @@ define i8 @v_mul_add_1_i8(i8 %x, i8 %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u16 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i8 %y, 1
%mul = mul i8 %x, %add
ret i8 %mul
@@ -3543,6 +4288,13 @@ define i8 @v_mul_add_1_i8_commute(i8 %x, i8 %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u16 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i8_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i8 %y, 1
%mul = mul i8 %add, %x
ret i8 %mul
@@ -3574,6 +4326,13 @@ define i8 @v_mul_add_1_i8_zext(i8 zeroext %x, i8 zeroext %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u16 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i8_zext:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i8 %y, 1
%mul = mul i8 %x, %add
ret i8 %mul
@@ -3605,6 +4364,13 @@ define i8 @v_mul_add_1_i8_zext_commute(i8 zeroext %x, i8 zeroext %y) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mad_u16 v0, v0, v1, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_i8_zext_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v0, v0, v1, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add i8 %y, 1
%mul = mul i8 %add, %x
ret i8 %mul
@@ -3656,6 +4422,18 @@ define <2 x i8> @v_mul_add_1_v2i8(<2 x i8> %x, <2 x i8> %y) {
; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX10-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_v2i8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v1, v1, v3, v1
+; GFX1250-NEXT: v_mad_u16 v0, v0, v2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_lshlrev_b16 v2, 8, v1
+; GFX1250-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX1250-NEXT: v_bitop3_b16 v0, v0, v2, 0xff bitop3:0xec
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i8> %y, <i8 1, i8 1>
%mul = mul <2 x i8> %x, %add
ret <2 x i8> %mul
@@ -3707,6 +4485,18 @@ define <2 x i8> @v_mul_add_1_v2i8_commute(<2 x i8> %x, <2 x i8> %y) {
; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX10-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_mul_add_1_v2i8_commute:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mad_u16 v1, v1, v3, v1
+; GFX1250-NEXT: v_mad_u16 v0, v0, v2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_lshlrev_b16 v2, 8, v1
+; GFX1250-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX1250-NEXT: v_bitop3_b16 v0, v0, v2, 0xff bitop3:0xec
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%add = add <2 x i8> %y, <i8 1, i8 1>
%mul = mul <2 x i8> %add, %x
ret <2 x i8> %mul
@@ -3749,6 +4539,17 @@ define i64 @mul_u24_with_uneven_operands(i32 %z) {
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v1, v0
; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: mul_u24_with_uneven_operands:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 1, v0
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v1, v0
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%c = and i32 %z, 1
%d = add nuw nsw i32 %c, 1
@@ -3792,6 +4593,17 @@ define i64 @mul_u24_with_uneven_operands_swapped(i32 %z) {
; GFX10-NEXT: v_mul_u32_u24_e32 v0, v0, v1
; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: mul_u24_with_uneven_operands_swapped:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 1, v0
+; GFX1250-NEXT: v_mul_u32_u24_e32 v0, v0, v1
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%c = and i32 %z, 1
%d = add nuw nsw i32 %c, 1
@@ -3836,6 +4648,17 @@ define i64 @mul_i24_with_uneven_operands(i32 %z) {
; GFX10-NEXT: v_mul_i32_i24_e32 v0, v2, v1
; GFX10-NEXT: v_mul_hi_i32_i24_e32 v1, v2, v1
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: mul_i24_with_uneven_operands:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_and_b32_e32 v1, 1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_nc_u32_e32 v2, 1, v1
+; GFX1250-NEXT: v_mul_i32_i24_e32 v0, v2, v1
+; GFX1250-NEXT: v_mul_hi_i32_i24_e32 v1, v2, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%c = and i32 %z, 1
%d = add nuw nsw i32 %c, 1
@@ -3879,6 +4702,17 @@ define i64 @mul_i24_with_uneven_operands_swapped(i32 %z) {
; GFX10-NEXT: v_mul_i32_i24_e32 v0, v1, v2
; GFX10-NEXT: v_mul_hi_i32_i24_e32 v1, v1, v2
; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: mul_i24_with_uneven_operands_swapped:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_and_b32_e32 v1, 1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_nc_u32_e32 v2, 1, v1
+; GFX1250-NEXT: v_mul_i32_i24_e32 v0, v1, v2
+; GFX1250-NEXT: v_mul_hi_i32_i24_e32 v1, v1, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
entry:
%c = and i32 %z, 1
%d = add nuw nsw i32 %c, 1
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
index f7e0388..f967e95 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -65,8 +65,8 @@ define amdgpu_kernel void @rsq_f32(ptr addrspace(1) noalias %out, ptr addrspace(
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
- %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
- %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -103,8 +103,8 @@ define amdgpu_kernel void @rsq_f32_sgpr(ptr addrspace(1) noalias %out, float %va
; GCN-UNSAFE-NEXT: s_mov_b32 s2, -1
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-UNSAFE-NEXT: s_endpgm
- %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
- %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -196,7 +196,7 @@ define amdgpu_kernel void @rsqrt_fmul(ptr addrspace(1) %out, ptr addrspace(1) %i
%x = call contract float @llvm.sqrt.f32(float %a)
%y = fmul contract float %x, %b
- %z = fdiv arcp contract float %c, %y
+ %z = fdiv arcp afn contract float %c, %y
store float %z, ptr addrspace(1) %out.gep
ret void
}
@@ -258,8 +258,8 @@ define amdgpu_kernel void @neg_rsq_f32(ptr addrspace(1) noalias %out, ptr addrsp
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
- %sqrt = call contract float @llvm.sqrt.f32(float %val)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -322,8 +322,8 @@ define amdgpu_kernel void @neg_rsq_neg_f32(ptr addrspace(1) noalias %out, ptr ad
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
%val.fneg = fneg float %val
- %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -343,8 +343,8 @@ define float @v_neg_rsq_neg_f32(float %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val.fneg = fneg float %val
- %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
ret float %div
}
@@ -367,8 +367,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32(<2 x float> %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val.fneg = fneg <2 x float> %val
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
ret <2 x float> %div
}
@@ -387,8 +387,8 @@ define float @v_neg_rsq_neg_f32_foldable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val0.neg = fneg float %val0
- %sqrt = call contract float @llvm.sqrt.f32(float %val0.neg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0.neg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
%user = fmul contract float %div, %val1
ret float %user
}
@@ -412,8 +412,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32_foldable_user(<2 x float> %val0, <2 x fl
; GCN-IEEE-NEXT: v_mul_f32_e64 v1, -v1, v3
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val0.fneg = fneg <2 x float> %val0
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
%user = fmul contract <2 x float> %div, %val1
ret <2 x float> %user
}
@@ -432,8 +432,8 @@ define float @v_neg_rsq_f32(float %val) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
ret float %div
}
@@ -455,8 +455,8 @@ define <2 x float> @v_neg_rsq_v2f32(<2 x float> %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
ret <2 x float> %div
}
@@ -474,8 +474,8 @@ define float @v_neg_rsq_f32_foldable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
%user = fmul contract float %div, %val1
ret float %user
}
@@ -643,8 +643,8 @@ define <2 x float> @v_neg_rsq_v2f32_foldable_user(<2 x float> %val0, <2 x float>
; CI-IEEE-SAFE-NEXT: v_mul_f32_e32 v0, v0, v2
; CI-IEEE-SAFE-NEXT: v_mul_f32_e32 v1, v1, v3
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
%user = fmul contract <2 x float> %div, %val1
ret <2 x float> %user
}
@@ -672,8 +672,8 @@ define float @v_rsq_f32(float %val) {
; GCN-IEEE-SAFE-NEXT: v_cndmask_b32_e64 v1, 0, 12, vcc
; GCN-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v0, v0, v1
; GCN-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -756,9 +756,9 @@ define { float, float } @v_rsq_f32_multi_use(float %val) {
; CI-IEEE-SAFE-NEXT: v_sub_i32_e32 v2, vcc, 0, v2
; CI-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v1, v1, v2
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
%insert.0 = insertvalue { float, float } poison, float %sqrt, 0
- %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+ %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
%insert.1 = insertvalue { float, float } %insert.0, float %div, 1
ret { float, float } %insert.1
}
@@ -838,8 +838,8 @@ define float @v_rsq_f32_missing_contract0(float %val) {
; CI-IEEE-SAFE-NEXT: v_sub_i32_e32 v0, vcc, 0, v0
; CI-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v0, v1, v0
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -855,8 +855,8 @@ define float @v_rsq_f32_missing_contract1(float %val) {
; GCN-IEEE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv arcp float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv arcp afn float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -876,8 +876,8 @@ define float @v_rsq_f32_contractable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd contract float %div, %val1
ret float %add
}
@@ -897,8 +897,8 @@ define float @v_rsq_f32_contractable_user_missing_contract0(float %val0, float %
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd contract float %div, %val1
ret float %add
}
@@ -918,8 +918,8 @@ define float @v_rsq_f32_contractable_user_missing_contract1(float %val0, float %
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd float %div, %val1
ret float %add
}
@@ -953,8 +953,8 @@ define float @v_rsq_f32_known_never_posdenormal(float nofpclass(psub) %val) {
; GCN-IEEE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
index b78cbb0..4aac193 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
@@ -4504,7 +4504,7 @@ define <2 x double> @v_rsq_v2f64__afn_nnan_ninf(<2 x double> %x) {
ret <2 x double> %rsq
}
-define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
+define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) {
; SI-SDAG-LABEL: s_rsq_f64_unsafe:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0
@@ -4648,8 +4648,8 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
; VI-GISEL-NEXT: v_readfirstlane_b32 s0, v0
; VI-GISEL-NEXT: v_readfirstlane_b32 s1, v1
; VI-GISEL-NEXT: ; return to shader part epilog
- %rsq = call contract double @llvm.sqrt.f64(double %x)
- %result = fdiv contract double 1.0, %rsq
+ %rsq = call contract afn double @llvm.sqrt.f64(double %x)
+ %result = fdiv contract afn double 1.0, %rsq
%cast = bitcast double %result to <2 x i32>
%cast.0 = extractelement <2 x i32> %cast, i32 0
%cast.1 = extractelement <2 x i32> %cast, i32 1
@@ -4660,7 +4660,7 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
ret <2 x i32> %insert.1
}
-define double @v_rsq_f64_unsafe(double %x) #0 {
+define double @v_rsq_f64_unsafe(double %x) {
; SI-SDAG-LABEL: v_rsq_f64_unsafe:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -4800,8 +4800,8 @@ define double @v_rsq_f64_unsafe(double %x) #0 {
; VI-GISEL-NEXT: v_fma_f64 v[0:1], -v[0:1], v[2:3], 1.0
; VI-GISEL-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[2:3]
; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call double @llvm.sqrt.f64(double %x)
- %rsq = fdiv double 1.0, %sqrt
+ %sqrt = call afn contract double @llvm.sqrt.f64(double %x)
+ %rsq = fdiv afn contract double 1.0, %sqrt
ret double %rsq
}
@@ -5737,7 +5737,6 @@ define double @v_div_const_contract_sqrt_f64(double %x) {
ret double %rsq
}
-attributes #0 = { "unsafe-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GCN: {{.*}}
; GISEL: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/saddsat.ll b/llvm/test/CodeGen/AMDGPU/saddsat.ll
index 4e27cf2..019eb2c 100644
--- a/llvm/test/CodeGen/AMDGPU/saddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/saddsat.ll
@@ -235,7 +235,7 @@ define <3 x i16> @v_saddsat_v3i16(<3 x i16> %lhs, <3 x i16> %rhs) {
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX6-NEXT: v_med3_i32 v3, v2, s4, v4
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_or_b32_e32 v2, 0xffff0000, v3
+; GFX6-NEXT: v_and_b32_e32 v2, 0xffff, v3
; GFX6-NEXT: v_alignbit_b32 v1, v3, v1, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
index 79adc9e..9230174 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
@@ -202,10 +202,9 @@ define <3 x i16> @v_uaddsat_v3i16(<3 x i16> %lhs, <3 x i16> %rhs) {
; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v5
; GFX6-NEXT: v_min_u32_e32 v0, 0xffff, v0
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX6-NEXT: v_min_u32_e32 v3, 0xffff, v2
+; GFX6-NEXT: v_min_u32_e32 v2, 0xffff, v2
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_or_b32_e32 v2, 0xffff0000, v3
-; GFX6-NEXT: v_alignbit_b32 v1, v3, v1, 16
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_uaddsat_v3i16:
diff --git a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
index aea2a8b..f2ecfe8 100644
--- a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s
define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
; GFX950-LABEL: v_ashr_pk_i8_i32:
; GFX950: ; %bb.0:
@@ -13,6 +14,20 @@ define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i3
; GFX950-NEXT: v_ashr_pk_i8_i32 v1, s0, v1, v2
; GFX950-NEXT: global_store_short v0, v1, s[6:7]
; GFX950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_ashr_pk_i8_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 31
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_ashr_pk_i8_i32 v0, s0, s1, v0
+; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
%src2.clamp = and i32 %src2, 31
@@ -40,6 +55,20 @@ define amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i3
; GFX950-NEXT: v_ashr_pk_u8_i32 v1, s0, v1, v2
; GFX950-NEXT: global_store_short v0, v1, s[6:7]
; GFX950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_ashr_pk_u8_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 31
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_ashr_pk_u8_i32 v0, s0, s1, v0
+; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
%src2.clamp = and i32 %src2, 31
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll
index c12265b..ed2f06b 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll
@@ -604,18 +604,18 @@ define i8 @test_vector_reduce_smax_v8i8(<8 x i8> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_smax_v8i8:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX7-SDAG-NEXT: v_max_i32_e32 v2, v2, v6
-; GFX7-SDAG-NEXT: v_max_i32_e32 v3, v3, v7
-; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX7-SDAG-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -698,15 +698,15 @@ define i8 @test_vector_reduce_smax_v8i8(<8 x i8> %v) {
; GFX9-SDAG-LABEL: test_vector_reduce_smax_v8i8:
; GFX9-SDAG: ; %bb.0: ; %entry
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX9-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX9-SDAG-NEXT: v_max_i16_sdwa v3, sext(v3), sext(v7) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_max3_i16 v1, v1, v5, v3
-; GFX9-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX9-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX9-SDAG-NEXT: v_max_i16_sdwa v2, sext(v2), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX9-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX9-SDAG-NEXT: v_max_i16_sdwa v1, sext(v1), sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max3_i16 v1, v1, v3, v7
+; GFX9-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX9-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX9-SDAG-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-SDAG-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX9-SDAG-NEXT: v_max3_i16 v0, v0, v4, v2
+; GFX9-SDAG-NEXT: v_max3_i16 v0, v0, v2, v6
; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX9-SDAG-NEXT: v_max_i16_sdwa v0, v0, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -741,20 +741,20 @@ define i8 @test_vector_reduce_smax_v8i8(<8 x i8> %v) {
; GFX10-SDAG-LABEL: test_vector_reduce_smax_v8i8:
; GFX10-SDAG: ; %bb.0: ; %entry
; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX10-SDAG-NEXT: v_max_i16 v3, v3, v7
+; GFX10-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX10-SDAG-NEXT: v_max3_i16 v1, v1, v5, v3
-; GFX10-SDAG-NEXT: v_bfe_i32 v3, v6, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX10-SDAG-NEXT: v_max_i16 v1, v1, v5
+; GFX10-SDAG-NEXT: v_max3_i16 v1, v1, v3, v7
+; GFX10-SDAG-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v4, v6, 0, 8
; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX10-SDAG-NEXT: v_max_i16 v2, v2, v3
+; GFX10-SDAG-NEXT: v_max_i16 v0, v0, v3
; GFX10-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX10-SDAG-NEXT: v_max3_i16 v0, v0, v4, v2
+; GFX10-SDAG-NEXT: v_max3_i16 v0, v0, v2, v4
; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX10-SDAG-NEXT: v_max_i16 v0, v0, v1
; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -796,62 +796,62 @@ define i8 @test_vector_reduce_smax_v8i8(<8 x i8> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_smax_v8i8:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v3, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v1, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v5, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v8.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v1.l, v1.l, v3.l
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v1.l, v5.l, v3.l, v1.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v6, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
-; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v7.l, v0.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v1.l, v5.l, v3.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v7, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v6.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v0.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v5, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v6
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v5
; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v0.l, v1.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v1.l, v2.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.l, v1.l, v2.l
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v0.l, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_smax_v8i8:
; GFX11-SDAG-FAKE16: ; %bb.0: ; %entry
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v3, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v5, v3
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v6, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v1, v1, v5
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v6, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v2, v2, v3
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v3
; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v4, v2
+; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v2, v4
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v1
@@ -906,39 +906,39 @@ define i8 @test_vector_reduce_smax_v8i8(<8 x i8> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v3, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v1, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v5, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v8.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v1.l, v1.l, v3.l
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v1.l, v5.l, v3.l, v1.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v6, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
-; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v7.l, v0.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v1.l, v5.l, v3.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v7, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v6.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v0.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v5, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v6
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v5
; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v0.l, v1.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v1.l, v2.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.l, v1.l, v2.l
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v0.l, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -949,23 +949,23 @@ define i8 @test_vector_reduce_smax_v8i8(<8 x i8> %v) {
; GFX12-SDAG-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v3, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v5, v3
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v6, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v1, v1, v5
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v6, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v2, v2, v3
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v3
; GFX12-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v4, v2
+; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v2, v4
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v1
@@ -1025,32 +1025,32 @@ define i8 @test_vector_reduce_smax_v16i8(<16 x i8> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_smax_v16i8:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_bfe_i32 v14, v14, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v9, v9, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v13, v13, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v12, v12, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX7-SDAG-NEXT: v_max_i32_e32 v7, v7, v15
-; GFX7-SDAG-NEXT: v_max_i32_e32 v6, v6, v14
-; GFX7-SDAG-NEXT: v_max_i32_e32 v4, v4, v12
-; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v8
-; GFX7-SDAG-NEXT: v_max_i32_e32 v5, v5, v13
+; GFX7-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v14, v14, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
; GFX7-SDAG-NEXT: v_max_i32_e32 v1, v1, v9
-; GFX7-SDAG-NEXT: v_max3_i32 v2, v2, v10, v6
-; GFX7-SDAG-NEXT: v_max3_i32 v3, v3, v11, v7
-; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v8
+; GFX7-SDAG-NEXT: v_max_i32_e32 v6, v6, v14
+; GFX7-SDAG-NEXT: v_max_i32_e32 v2, v2, v10
+; GFX7-SDAG-NEXT: v_max_i32_e32 v7, v7, v15
+; GFX7-SDAG-NEXT: v_max_i32_e32 v3, v3, v11
+; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v4, v12
+; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v5, v13
+; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -1165,21 +1165,21 @@ define i8 @test_vector_reduce_smax_v16i8(<16 x i8> %v) {
; GFX9-SDAG-LABEL: test_vector_reduce_smax_v16i8:
; GFX9-SDAG: ; %bb.0: ; %entry
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX9-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
-; GFX9-SDAG-NEXT: v_max_i16_sdwa v7, sext(v7), sext(v15) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_max_i16_sdwa v5, sext(v5), sext(v13) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_bfe_i32 v13, v13, 0, 8
+; GFX9-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX9-SDAG-NEXT: v_max_i16_sdwa v1, sext(v1), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_max3_i16 v3, v3, v11, v7
-; GFX9-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX9-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX9-SDAG-NEXT: v_max_i16_sdwa v6, sext(v6), sext(v14) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_max3_i16 v1, v1, v5, v3
-; GFX9-SDAG-NEXT: v_max_i16_sdwa v4, sext(v4), sext(v12) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max_i16_sdwa v7, sext(v7), sext(v15) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max_i16_sdwa v3, sext(v3), sext(v11) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max3_i16 v1, v1, v5, v13
+; GFX9-SDAG-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX9-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
; GFX9-SDAG-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_max3_i16 v2, v2, v10, v6
+; GFX9-SDAG-NEXT: v_max3_i16 v1, v1, v3, v7
+; GFX9-SDAG-NEXT: v_max_i16_sdwa v6, sext(v6), sext(v14) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max_i16_sdwa v2, sext(v2), sext(v10) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max3_i16 v0, v0, v4, v12
; GFX9-SDAG-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX9-SDAG-NEXT: v_max3_i16 v0, v0, v4, v2
+; GFX9-SDAG-NEXT: v_max3_i16 v0, v0, v2, v6
; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX9-SDAG-NEXT: v_max_i16_sdwa v0, v0, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -1222,34 +1222,34 @@ define i8 @test_vector_reduce_smax_v16i8(<16 x i8> %v) {
; GFX10-SDAG-LABEL: test_vector_reduce_smax_v16i8:
; GFX10-SDAG: ; %bb.0: ; %entry
; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v9, v9, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v13, v13, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX10-SDAG-NEXT: v_max_i16 v1, v1, v9
+; GFX10-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v9, v12, 0, 8
; GFX10-SDAG-NEXT: v_max_i16 v7, v7, v15
+; GFX10-SDAG-NEXT: v_max_i16 v3, v3, v11
+; GFX10-SDAG-NEXT: v_max3_i16 v1, v1, v5, v13
+; GFX10-SDAG-NEXT: v_bfe_i32 v5, v8, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v8, v10, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX10-SDAG-NEXT: v_max_i16 v5, v5, v13
-; GFX10-SDAG-NEXT: v_max_i16 v1, v1, v9
-; GFX10-SDAG-NEXT: v_max3_i16 v3, v3, v11, v7
-; GFX10-SDAG-NEXT: v_bfe_i32 v7, v14, 0, 8
+; GFX10-SDAG-NEXT: v_max3_i16 v1, v1, v3, v7
+; GFX10-SDAG-NEXT: v_bfe_i32 v3, v14, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX10-SDAG-NEXT: v_max3_i16 v1, v1, v5, v3
-; GFX10-SDAG-NEXT: v_bfe_i32 v3, v12, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX10-SDAG-NEXT: v_max_i16 v5, v6, v7
+; GFX10-SDAG-NEXT: v_max_i16 v0, v0, v5
; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX10-SDAG-NEXT: v_max_i16 v3, v4, v3
-; GFX10-SDAG-NEXT: v_max_i16 v0, v0, v8
-; GFX10-SDAG-NEXT: v_max3_i16 v2, v2, v10, v5
+; GFX10-SDAG-NEXT: v_max_i16 v3, v6, v3
+; GFX10-SDAG-NEXT: v_max_i16 v2, v2, v8
+; GFX10-SDAG-NEXT: v_max3_i16 v0, v0, v4, v9
; GFX10-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX10-SDAG-NEXT: v_max3_i16 v0, v0, v3, v2
+; GFX10-SDAG-NEXT: v_max3_i16 v0, v0, v2, v3
; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX10-SDAG-NEXT: v_max_i16 v0, v0, v1
; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -1307,59 +1307,58 @@ define i8 @test_vector_reduce_smax_v16i8(<16 x i8> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_smax_v16i8:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v16, v2, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v17.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v9.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v15, v2, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v3, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v7, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v14, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v14, v6, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v9.l, v4.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v13, v0, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v16, v4, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v17, v0, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v9, v4, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v5, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v6, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v1, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v10, v10, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v13, v13, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v18.l, v2.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v0, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v11, v3, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v2, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v6.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v13.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v11.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v6.l
; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v0.l, v1.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v11.l
-; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v0.h, v2.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v1.h, v4.l, v5.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v11.l, v12.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v9, v9, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v1.l, v6.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v8, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v14.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v17, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v1.h, v0.h, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v11, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v9.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.l
; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v0.h, v2.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v16.l
+; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v1.h, v4.l, v5.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v8, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.l, v1.l, v6.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v9.l, v14.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v10, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.l, v1.h, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v17.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v18, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v9, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v0.l
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.l
-; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v1.l, v1.l, v4.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v0.h, v2.l, v3.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v16.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v12.l
; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 8, v7
+; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v1.l, v1.l, v4.l
; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v5.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.h, v2.l, v3.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.h, v0.h, v2.l, v3.l
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v6, 0, 8
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.l, v1.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.h, v0.l, v1.l
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v0.l, v1.l
@@ -1368,37 +1367,37 @@ define i8 @test_vector_reduce_smax_v16i8(<16 x i8> %v) {
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_smax_v16i8:
; GFX11-SDAG-FAKE16: ; %bb.0: ; %entry
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v15, v15, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v9, v9, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v13, v13, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v15, v15, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v1, v1, v9
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v9, v12, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v7, v7, v15
+; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v3, v3, v11
+; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v5, v13
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v8, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v8, v10, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v5, v5, v13
-; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v1, v1, v9
-; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v3, v3, v11, v7
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v14, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v14, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v8, v8, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v5, v3
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v12, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v5, v6, v7
+; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v5
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v3, v4, v3
+; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v3, v6, v3
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v8
-; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v2, v2, v10, v5
+; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v2, v2, v8
+; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v4, v9
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v3, v2
+; GFX11-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v2, v3
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v1
@@ -1468,59 +1467,58 @@ define i8 @test_vector_reduce_smax_v16i8(<16 x i8> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v16, v2, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v17.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v9.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v15, v2, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v3, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v7, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v14, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v14, v6, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v9.l, v4.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v13, v0, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v16, v4, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v17, v0, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v9, v4, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v5, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v6, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v1, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v10, v10, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v13, v13, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v18.l, v2.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v0, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v11, v3, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v2, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v6.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v13.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v11.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v6.l
; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v0.l, v1.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v11.l
-; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v0.h, v2.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v1.h, v4.l, v5.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v11.l, v12.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v9, v9, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v1.l, v6.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v8, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v14.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v17, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v1.h, v0.h, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v11, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v9.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.l
; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v0.h, v2.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v16.l
+; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v1.h, v4.l, v5.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v8, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.l, v1.l, v6.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v9.l, v14.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v10, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.l, v1.h, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v17.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v18, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v9, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v0.l
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.l
-; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v1.l, v1.l, v4.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v0.h, v2.l, v3.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v16.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v12.l
; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 8, v7
+; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v1.l, v1.l, v4.l
; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v5.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.h, v2.l, v3.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.h, v0.h, v2.l, v3.l
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v6, 0, 8
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.l, v1.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_max3_i16 v0.l, v0.h, v0.l, v1.l
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-TRUE16-NEXT: v_max_i16 v0.l, v0.l, v1.l
@@ -1533,37 +1531,37 @@ define i8 @test_vector_reduce_smax_v16i8(<16 x i8> %v) {
; GFX12-SDAG-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v15, v15, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v9, v9, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v13, v13, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v15, v15, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v1, v1, v9
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v9, v12, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v7, v7, v15
+; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v3, v3, v11
+; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v5, v13
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v8, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v8, v10, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v5, v5, v13
-; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v1, v1, v9
-; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v3, v3, v11, v7
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v14, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v14, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v8, v8, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v1, v1, v5, v3
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v12, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v5, v6, v7
+; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v5
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v3, v4, v3
+; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v3, v6, v3
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v8
-; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v2, v2, v10, v5
+; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v2, v2, v8
+; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v4, v9
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v3, v2
+; GFX12-SDAG-FAKE16-NEXT: v_max3_i16 v0, v0, v2, v3
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_max_i16 v0, v0, v1
@@ -2055,18 +2053,18 @@ define i16 @test_vector_reduce_smax_v8i16(<8 x i16> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_smax_v8i16:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-SDAG-NEXT: v_max_i32_e32 v2, v2, v6
-; GFX7-SDAG-NEXT: v_max_i32_e32 v3, v3, v7
-; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX7-SDAG-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -2253,32 +2251,32 @@ define i16 @test_vector_reduce_smax_v16i16(<16 x i16> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_smax_v16i16:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_bfe_i32 v14, v14, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v9, v9, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v13, v13, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v12, v12, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-SDAG-NEXT: v_max_i32_e32 v7, v7, v15
-; GFX7-SDAG-NEXT: v_max_i32_e32 v6, v6, v14
-; GFX7-SDAG-NEXT: v_max_i32_e32 v4, v4, v12
-; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v8
-; GFX7-SDAG-NEXT: v_max_i32_e32 v5, v5, v13
+; GFX7-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v14, v14, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 16
; GFX7-SDAG-NEXT: v_max_i32_e32 v1, v1, v9
-; GFX7-SDAG-NEXT: v_max3_i32 v2, v2, v10, v6
-; GFX7-SDAG-NEXT: v_max3_i32 v3, v3, v11, v7
-; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v8
+; GFX7-SDAG-NEXT: v_max_i32_e32 v6, v6, v14
+; GFX7-SDAG-NEXT: v_max_i32_e32 v2, v2, v10
+; GFX7-SDAG-NEXT: v_max_i32_e32 v7, v7, v15
+; GFX7-SDAG-NEXT: v_max_i32_e32 v3, v3, v11
+; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v4, v12
+; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v5, v13
+; GFX7-SDAG-NEXT: v_max3_i32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_max3_i32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_max_i32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll
index 5056747..8812cae 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll
@@ -604,18 +604,18 @@ define i8 @test_vector_reduce_smin_v8i8(<8 x i8> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_smin_v8i8:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX7-SDAG-NEXT: v_min_i32_e32 v2, v2, v6
-; GFX7-SDAG-NEXT: v_min_i32_e32 v3, v3, v7
-; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX7-SDAG-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -698,15 +698,15 @@ define i8 @test_vector_reduce_smin_v8i8(<8 x i8> %v) {
; GFX9-SDAG-LABEL: test_vector_reduce_smin_v8i8:
; GFX9-SDAG: ; %bb.0: ; %entry
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX9-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX9-SDAG-NEXT: v_min_i16_sdwa v3, sext(v3), sext(v7) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_min3_i16 v1, v1, v5, v3
-; GFX9-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX9-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX9-SDAG-NEXT: v_min_i16_sdwa v2, sext(v2), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX9-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX9-SDAG-NEXT: v_min_i16_sdwa v1, sext(v1), sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min3_i16 v1, v1, v3, v7
+; GFX9-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX9-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX9-SDAG-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-SDAG-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX9-SDAG-NEXT: v_min3_i16 v0, v0, v4, v2
+; GFX9-SDAG-NEXT: v_min3_i16 v0, v0, v2, v6
; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX9-SDAG-NEXT: v_min_i16_sdwa v0, v0, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -741,20 +741,20 @@ define i8 @test_vector_reduce_smin_v8i8(<8 x i8> %v) {
; GFX10-SDAG-LABEL: test_vector_reduce_smin_v8i8:
; GFX10-SDAG: ; %bb.0: ; %entry
; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX10-SDAG-NEXT: v_min_i16 v3, v3, v7
+; GFX10-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX10-SDAG-NEXT: v_min3_i16 v1, v1, v5, v3
-; GFX10-SDAG-NEXT: v_bfe_i32 v3, v6, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX10-SDAG-NEXT: v_min_i16 v1, v1, v5
+; GFX10-SDAG-NEXT: v_min3_i16 v1, v1, v3, v7
+; GFX10-SDAG-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v4, v6, 0, 8
; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX10-SDAG-NEXT: v_min_i16 v2, v2, v3
+; GFX10-SDAG-NEXT: v_min_i16 v0, v0, v3
; GFX10-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX10-SDAG-NEXT: v_min3_i16 v0, v0, v4, v2
+; GFX10-SDAG-NEXT: v_min3_i16 v0, v0, v2, v4
; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX10-SDAG-NEXT: v_min_i16 v0, v0, v1
; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -796,62 +796,62 @@ define i8 @test_vector_reduce_smin_v8i8(<8 x i8> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_smin_v8i8:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v3, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v1, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v5, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v8.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v1.l, v1.l, v3.l
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v1.l, v5.l, v3.l, v1.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v6, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
-; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v7.l, v0.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v1.l, v5.l, v3.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v7, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v6.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v0.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v5, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v6
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v5
; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v0.l, v1.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v1.l, v2.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.l, v1.l, v2.l
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v0.l, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_smin_v8i8:
; GFX11-SDAG-FAKE16: ; %bb.0: ; %entry
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v3, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v5, v3
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v6, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v1, v1, v5
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v6, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v2, v2, v3
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v3
; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v4, v2
+; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v2, v4
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v1
@@ -906,39 +906,39 @@ define i8 @test_vector_reduce_smin_v8i8(<8 x i8> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v3, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v1, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v5, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v8.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v1.l, v1.l, v3.l
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v1.l, v5.l, v3.l, v1.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v6, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
-; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v7.l, v0.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v1.l, v5.l, v3.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v7, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v6.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v0.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v5, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v6
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v5
; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v0.l, v1.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v4, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v1.l, v2.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.l, v1.l, v2.l
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v0.l, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -949,23 +949,23 @@ define i8 @test_vector_reduce_smin_v8i8(<8 x i8> %v) {
; GFX12-SDAG-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v3, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v5, v3
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v6, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v1, v1, v5
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v4, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v6, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v2, v2, v3
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v3
; GFX12-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v4, v2
+; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v2, v4
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v1
@@ -1025,32 +1025,32 @@ define i8 @test_vector_reduce_smin_v16i8(<16 x i8> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_smin_v16i8:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_bfe_i32 v14, v14, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v9, v9, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v13, v13, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 8
-; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v12, v12, 0, 8
; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX7-SDAG-NEXT: v_min_i32_e32 v7, v7, v15
-; GFX7-SDAG-NEXT: v_min_i32_e32 v6, v6, v14
-; GFX7-SDAG-NEXT: v_min_i32_e32 v4, v4, v12
-; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v8
-; GFX7-SDAG-NEXT: v_min_i32_e32 v5, v5, v13
+; GFX7-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v14, v14, 0, 8
+; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
; GFX7-SDAG-NEXT: v_min_i32_e32 v1, v1, v9
-; GFX7-SDAG-NEXT: v_min3_i32 v2, v2, v10, v6
-; GFX7-SDAG-NEXT: v_min3_i32 v3, v3, v11, v7
-; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v8
+; GFX7-SDAG-NEXT: v_min_i32_e32 v6, v6, v14
+; GFX7-SDAG-NEXT: v_min_i32_e32 v2, v2, v10
+; GFX7-SDAG-NEXT: v_min_i32_e32 v7, v7, v15
+; GFX7-SDAG-NEXT: v_min_i32_e32 v3, v3, v11
+; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v4, v12
+; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v5, v13
+; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -1165,21 +1165,21 @@ define i8 @test_vector_reduce_smin_v16i8(<16 x i8> %v) {
; GFX9-SDAG-LABEL: test_vector_reduce_smin_v16i8:
; GFX9-SDAG: ; %bb.0: ; %entry
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX9-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
-; GFX9-SDAG-NEXT: v_min_i16_sdwa v7, sext(v7), sext(v15) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_min_i16_sdwa v5, sext(v5), sext(v13) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_bfe_i32 v13, v13, 0, 8
+; GFX9-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
; GFX9-SDAG-NEXT: v_min_i16_sdwa v1, sext(v1), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_min3_i16 v3, v3, v11, v7
-; GFX9-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX9-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX9-SDAG-NEXT: v_min_i16_sdwa v6, sext(v6), sext(v14) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_min3_i16 v1, v1, v5, v3
-; GFX9-SDAG-NEXT: v_min_i16_sdwa v4, sext(v4), sext(v12) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min_i16_sdwa v7, sext(v7), sext(v15) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min_i16_sdwa v3, sext(v3), sext(v11) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min3_i16 v1, v1, v5, v13
+; GFX9-SDAG-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX9-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
; GFX9-SDAG-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_min3_i16 v2, v2, v10, v6
+; GFX9-SDAG-NEXT: v_min3_i16 v1, v1, v3, v7
+; GFX9-SDAG-NEXT: v_min_i16_sdwa v6, sext(v6), sext(v14) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min_i16_sdwa v2, sext(v2), sext(v10) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min3_i16 v0, v0, v4, v12
; GFX9-SDAG-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX9-SDAG-NEXT: v_min3_i16 v0, v0, v4, v2
+; GFX9-SDAG-NEXT: v_min3_i16 v0, v0, v2, v6
; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX9-SDAG-NEXT: v_min_i16_sdwa v0, v0, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -1222,34 +1222,34 @@ define i8 @test_vector_reduce_smin_v16i8(<16 x i8> %v) {
; GFX10-SDAG-LABEL: test_vector_reduce_smin_v16i8:
; GFX10-SDAG: ; %bb.0: ; %entry
; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v9, v9, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v13, v13, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX10-SDAG-NEXT: v_min_i16 v1, v1, v9
+; GFX10-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v9, v12, 0, 8
; GFX10-SDAG-NEXT: v_min_i16 v7, v7, v15
+; GFX10-SDAG-NEXT: v_min_i16 v3, v3, v11
+; GFX10-SDAG-NEXT: v_min3_i16 v1, v1, v5, v13
+; GFX10-SDAG-NEXT: v_bfe_i32 v5, v8, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX10-SDAG-NEXT: v_bfe_i32 v8, v10, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX10-SDAG-NEXT: v_min_i16 v5, v5, v13
-; GFX10-SDAG-NEXT: v_min_i16 v1, v1, v9
-; GFX10-SDAG-NEXT: v_min3_i16 v3, v3, v11, v7
-; GFX10-SDAG-NEXT: v_bfe_i32 v7, v14, 0, 8
+; GFX10-SDAG-NEXT: v_min3_i16 v1, v1, v3, v7
+; GFX10-SDAG-NEXT: v_bfe_i32 v3, v14, 0, 8
; GFX10-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX10-SDAG-NEXT: v_min3_i16 v1, v1, v5, v3
-; GFX10-SDAG-NEXT: v_bfe_i32 v3, v12, 0, 8
-; GFX10-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX10-SDAG-NEXT: v_min_i16 v5, v6, v7
+; GFX10-SDAG-NEXT: v_min_i16 v0, v0, v5
; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX10-SDAG-NEXT: v_min_i16 v3, v4, v3
-; GFX10-SDAG-NEXT: v_min_i16 v0, v0, v8
-; GFX10-SDAG-NEXT: v_min3_i16 v2, v2, v10, v5
+; GFX10-SDAG-NEXT: v_min_i16 v3, v6, v3
+; GFX10-SDAG-NEXT: v_min_i16 v2, v2, v8
+; GFX10-SDAG-NEXT: v_min3_i16 v0, v0, v4, v9
; GFX10-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX10-SDAG-NEXT: v_min3_i16 v0, v0, v3, v2
+; GFX10-SDAG-NEXT: v_min3_i16 v0, v0, v2, v3
; GFX10-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX10-SDAG-NEXT: v_min_i16 v0, v0, v1
; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -1307,59 +1307,58 @@ define i8 @test_vector_reduce_smin_v16i8(<16 x i8> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_smin_v16i8:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v16, v2, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v17.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v9.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v15, v2, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v3, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v7, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v14, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v14, v6, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v9.l, v4.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v13, v0, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v16, v4, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v17, v0, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v9, v4, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v5, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v6, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v1, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v10, v10, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v13, v13, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v18.l, v2.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v0, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v11, v3, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v2, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v6.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v13.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v11.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v6.l
; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v0.l, v1.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v11.l
-; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v0.h, v2.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v1.h, v4.l, v5.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v11.l, v12.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v9, v9, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v1.l, v6.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v8, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v14.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v17, 0, 8
-; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v1.h, v0.h, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v11, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v9.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.l
; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v0.h, v2.l, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v16.l
+; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v1.h, v4.l, v5.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v8, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.l, v1.l, v6.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v9.l, v14.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v10, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.l, v1.h, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v17.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.l
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v18, 0, 8
+; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v9, 0, 8
; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v0.l
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.l
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.l
-; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v1.l, v1.l, v4.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v0.h, v2.l, v3.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v16.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v12.l
; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 8, v7
+; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v1.l, v1.l, v4.l
; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v5.l, v0.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.h, v2.l, v3.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.h, v0.h, v2.l, v3.l
; GFX11-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v6, 0, 8
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.l, v1.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.h, v0.l, v1.l
; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v0.l, v1.l
@@ -1368,37 +1367,37 @@ define i8 @test_vector_reduce_smin_v16i8(<16 x i8> %v) {
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_smin_v16i8:
; GFX11-SDAG-FAKE16: ; %bb.0: ; %entry
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v15, v15, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v9, v9, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v13, v13, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v15, v15, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v1, v1, v9
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v9, v12, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v7, v7, v15
+; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v3, v3, v11
+; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v5, v13
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v8, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v8, v10, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v5, v5, v13
-; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v1, v1, v9
-; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v3, v3, v11, v7
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v14, 0, 8
+; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v14, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v8, v8, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v5, v3
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v12, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v5, v6, v7
+; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v5
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v3, v4, v3
+; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v3, v6, v3
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v8
-; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v2, v2, v10, v5
+; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v2, v2, v8
+; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v4, v9
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v3, v2
+; GFX11-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v2, v3
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX11-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v1
@@ -1468,59 +1467,58 @@ define i8 @test_vector_reduce_smin_v16i8(<16 x i8> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v16, v2, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v17.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v9.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v15, v2, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v3, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v3, v7, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v14, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v14, v6, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v9.l, v4.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v13, v0, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v16, v4, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v17, v0, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v9, v4, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v5, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v6, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v1, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v10, v10, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v13, v13, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v18.l, v2.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v0, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v11, v3, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v2, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v6.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v13.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v11.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v5.l, v6.l
; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v0.l, v1.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v11.l
-; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v0.h, v2.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v1.h, v4.l, v5.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v11.l, v12.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v9, v9, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v1.l, v6.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v8, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v14.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v7.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v17, 0, 8
-; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v1.h, v0.h, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v11, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v9.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.l
; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v0.h, v2.l, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v16.l
+; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v1.h, v4.l, v5.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v8, v8, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.l, v1.l, v6.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v9.l, v14.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v6, v10, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.l, v1.h, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v17.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.l
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v5, v18, 0, 8
+; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v4, v9, 0, 8
; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v0.l
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.l
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.l
-; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v1.l, v1.l, v4.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v0.h, v2.l, v3.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v16.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v12.l
; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 8, v7
+; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v1.l, v1.l, v4.l
; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v5.l, v0.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.h, v2.l, v3.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.h, v0.h, v2.l, v3.l
; GFX12-SDAG-TRUE16-NEXT: v_bfe_i32 v2, v6, 0, 8
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.l, v1.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_min3_i16 v0.l, v0.h, v0.l, v1.l
; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-TRUE16-NEXT: v_min_i16 v0.l, v0.l, v1.l
@@ -1533,37 +1531,37 @@ define i8 @test_vector_reduce_smin_v16i8(<16 x i8> %v) {
; GFX12-SDAG-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v15, v15, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v11, v11, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v9, v9, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v13, v13, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v5, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v15, v15, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v1, v1, v9
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v9, v12, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v7, v7, v15
+; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v3, v3, v11
+; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v5, v13
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v5, v8, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v8, v10, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v6, v6, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v10, v10, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v5, v5, v13
-; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v1, v1, v9
-; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v3, v3, v11, v7
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v7, v14, 0, 8
+; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v14, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v8, v8, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v1, v1, v5, v3
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v3, v12, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v5, v6, v7
+; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v5
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v3, v4, v3
+; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v3, v6, v3
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v8
-; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v2, v2, v10, v5
+; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v2, v2, v8
+; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v4, v9
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v3, v2
+; GFX12-SDAG-FAKE16-NEXT: v_min3_i16 v0, v0, v2, v3
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-SDAG-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8
; GFX12-SDAG-FAKE16-NEXT: v_min_i16 v0, v0, v1
@@ -2055,18 +2053,18 @@ define i16 @test_vector_reduce_smin_v8i16(<8 x i16> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_smin_v8i16:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-SDAG-NEXT: v_min_i32_e32 v2, v2, v6
-; GFX7-SDAG-NEXT: v_min_i32_e32 v3, v3, v7
-; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX7-SDAG-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -2253,32 +2251,32 @@ define i16 @test_vector_reduce_smin_v16i16(<16 x i16> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_smin_v16i16:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_bfe_i32 v14, v14, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v9, v9, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v13, v13, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX7-SDAG-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v12, v12, 0, 16
; GFX7-SDAG-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-SDAG-NEXT: v_min_i32_e32 v7, v7, v15
-; GFX7-SDAG-NEXT: v_min_i32_e32 v6, v6, v14
-; GFX7-SDAG-NEXT: v_min_i32_e32 v4, v4, v12
-; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v8
-; GFX7-SDAG-NEXT: v_min_i32_e32 v5, v5, v13
+; GFX7-SDAG-NEXT: v_bfe_i32 v11, v11, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v15, v15, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v10, v10, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v14, v14, 0, 16
+; GFX7-SDAG-NEXT: v_bfe_i32 v6, v6, 0, 16
; GFX7-SDAG-NEXT: v_min_i32_e32 v1, v1, v9
-; GFX7-SDAG-NEXT: v_min3_i32 v2, v2, v10, v6
-; GFX7-SDAG-NEXT: v_min3_i32 v3, v3, v11, v7
-; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v8
+; GFX7-SDAG-NEXT: v_min_i32_e32 v6, v6, v14
+; GFX7-SDAG-NEXT: v_min_i32_e32 v2, v2, v10
+; GFX7-SDAG-NEXT: v_min_i32_e32 v7, v7, v15
+; GFX7-SDAG-NEXT: v_min_i32_e32 v3, v3, v11
+; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v4, v12
+; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v5, v13
+; GFX7-SDAG-NEXT: v_min3_i32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_min3_i32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_min_i32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll
index ddae1b2..82eb122 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll
@@ -320,7 +320,7 @@ define i8 @test_vector_reduce_umax_v4i8(<4 x i8> %v) {
; GFX8-SDAG-NEXT: v_max_u16_sdwa v1, v1, v3 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX8-SDAG-NEXT: v_max_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX8-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX8-SDAG-NEXT: v_max_u16_e32 v0, v0, v1
+; GFX8-SDAG-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-GISEL-LABEL: test_vector_reduce_umax_v4i8:
@@ -351,8 +351,9 @@ define i8 @test_vector_reduce_umax_v4i8(<4 x i8> %v) {
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-SDAG-NEXT: v_max_u16_sdwa v1, v1, v3 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX9-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX9-SDAG-NEXT: v_max3_u16 v0, v0, v2, v1
; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -387,9 +388,9 @@ define i8 @test_vector_reduce_umax_v4i8(<4 x i8> %v) {
; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX10-SDAG-NEXT: v_max_u16 v1, v1, v3
-; GFX10-SDAG-NEXT: v_mov_b32_e32 v3, 8
; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX10-SDAG-NEXT: v_lshrrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX10-SDAG-NEXT: v_max3_u16 v0, v0, v2, v1
; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -429,8 +430,8 @@ define i8 @test_vector_reduce_umax_v4i8(<4 x i8> %v) {
; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v1.l, v1.l, v1.h
; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v0.h, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -446,8 +447,8 @@ define i8 @test_vector_reduce_umax_v4i8(<4 x i8> %v) {
; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v1, v1, v3
; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v2, v1
; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -500,8 +501,8 @@ define i8 @test_vector_reduce_umax_v4i8(<4 x i8> %v) {
; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v1.l, v1.l, v1.h
; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v0.h, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -521,8 +522,8 @@ define i8 @test_vector_reduce_umax_v4i8(<4 x i8> %v) {
; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v1, v1, v3
; GFX12-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX12-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v2, v1
; GFX12-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -572,18 +573,18 @@ define i8 @test_vector_reduce_umax_v8i8(<8 x i8> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_umax_v8i8:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX7-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX7-SDAG-NEXT: v_max_u32_e32 v2, v2, v6
-; GFX7-SDAG-NEXT: v_max_u32_e32 v3, v3, v7
-; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-SDAG-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -628,7 +629,7 @@ define i8 @test_vector_reduce_umax_v8i8(<8 x i8> %v) {
; GFX8-SDAG-NEXT: v_max_u16_sdwa v1, v1, v3 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-SDAG-NEXT: v_max_u16_e32 v0, v0, v2
; GFX8-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX8-SDAG-NEXT: v_max_u16_e32 v0, v0, v1
+; GFX8-SDAG-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-GISEL-LABEL: test_vector_reduce_umax_v8i8:
@@ -660,17 +661,17 @@ define i8 @test_vector_reduce_umax_v8i8(<8 x i8> %v) {
; GFX9-SDAG-LABEL: test_vector_reduce_umax_v8i8:
; GFX9-SDAG: ; %bb.0: ; %entry
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX9-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX9-SDAG-NEXT: v_max_u16_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_max3_u16 v1, v1, v5, v3
-; GFX9-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX9-SDAG-NEXT: v_max_u16_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX9-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-SDAG-NEXT: v_max_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max3_u16 v1, v1, v3, v7
+; GFX9-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX9-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-SDAG-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-SDAG-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX9-SDAG-NEXT: v_max3_u16 v0, v0, v4, v2
+; GFX9-SDAG-NEXT: v_max3_u16 v0, v0, v2, v6
; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX9-SDAG-NEXT: v_max_u16_e32 v0, v0, v1
+; GFX9-SDAG-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-GISEL-LABEL: test_vector_reduce_umax_v8i8:
@@ -702,21 +703,21 @@ define i8 @test_vector_reduce_umax_v8i8(<8 x i8> %v) {
; GFX10-SDAG-LABEL: test_vector_reduce_umax_v8i8:
; GFX10-SDAG: ; %bb.0: ; %entry
; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX10-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX10-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX10-SDAG-NEXT: v_max_u16 v3, v3, v7
+; GFX10-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX10-SDAG-NEXT: v_max_u16 v2, v2, v6
-; GFX10-SDAG-NEXT: v_max3_u16 v1, v1, v5, v3
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-SDAG-NEXT: v_max_u16 v1, v1, v5
+; GFX10-SDAG-NEXT: v_max3_u16 v1, v1, v3, v7
; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX10-SDAG-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v6
; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX10-SDAG-NEXT: v_max3_u16 v0, v0, v3, v2
-; GFX10-SDAG-NEXT: v_lshrrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-SDAG-NEXT: v_max_u16 v0, v0, v3
+; GFX10-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-SDAG-NEXT: v_max3_u16 v0, v0, v2, v4
+; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX10-SDAG-NEXT: v_max_u16 v0, v0, v1
; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -756,50 +757,49 @@ define i8 @test_vector_reduce_umax_v8i8(<8 x i8> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_umax_v8i8:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v5.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v5.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v6.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v1.h, v3.l, v1.h
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v1.l, v1.l, v3.h, v1.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v1.l, v1.l, v1.h
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l
-; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v0.h, v1.h, v0.h
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v1.l, v3.l, v3.h
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v1.l, v0.h
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v6.l
; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v1.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 8, v3
+; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v1.h, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v2.l
+; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v0.h
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_umax_v8i8:
; GFX11-SDAG-FAKE16: ; %bb.0: ; %entry
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v3, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v5, v3
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v6
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v1, v1, v5
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v6
; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v2, v2, v3
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v4, v2
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v0, v0, v3
; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v2, v4
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v0, v0, v1
; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -852,27 +852,26 @@ define i8 @test_vector_reduce_umax_v8i8(<8 x i8> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v5.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v5.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v6.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v1.h, v3.l, v1.h
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v1.l, v1.l, v3.h, v1.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v1.l, v1.l, v1.h
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l
-; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v0.h, v1.h, v0.h
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v1.l, v3.l, v3.h
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v1.l, v0.h
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v6.l
; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v1.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 8, v3
+; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v1.h, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v2.l
+; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v0.h
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_umax_v8i8:
@@ -882,24 +881,24 @@ define i8 @test_vector_reduce_umax_v8i8(<8 x i8> %v) {
; GFX12-SDAG-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v3, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v5, v3
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v6
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v1, v1, v5
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v6
; GFX12-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v2, v2, v3
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v4, v2
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v0, v0, v3
; GFX12-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v2, v4
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v0, v0, v1
; GFX12-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -957,32 +956,32 @@ define i8 @test_vector_reduce_umax_v16i8(<16 x i8> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_umax_v16i8:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_and_b32_e32 v14, 0xff, v14
-; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX7-SDAG-NEXT: v_and_b32_e32 v15, 0xff, v15
-; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX7-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX7-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v9, 0xff, v9
; GFX7-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX7-SDAG-NEXT: v_and_b32_e32 v13, 0xff, v13
; GFX7-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX7-SDAG-NEXT: v_and_b32_e32 v8, 0xff, v8
-; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v12, 0xff, v12
; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX7-SDAG-NEXT: v_max_u32_e32 v7, v7, v15
-; GFX7-SDAG-NEXT: v_max_u32_e32 v6, v6, v14
-; GFX7-SDAG-NEXT: v_max_u32_e32 v4, v4, v12
-; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v8
-; GFX7-SDAG-NEXT: v_max_u32_e32 v5, v5, v13
+; GFX7-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-SDAG-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX7-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
; GFX7-SDAG-NEXT: v_max_u32_e32 v1, v1, v9
-; GFX7-SDAG-NEXT: v_max3_u32 v2, v2, v10, v6
-; GFX7-SDAG-NEXT: v_max3_u32 v3, v3, v11, v7
-; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v8
+; GFX7-SDAG-NEXT: v_max_u32_e32 v6, v6, v14
+; GFX7-SDAG-NEXT: v_max_u32_e32 v2, v2, v10
+; GFX7-SDAG-NEXT: v_max_u32_e32 v7, v7, v15
+; GFX7-SDAG-NEXT: v_max_u32_e32 v3, v3, v11
+; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v4, v12
+; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v5, v13
+; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -1051,9 +1050,8 @@ define i8 @test_vector_reduce_umax_v16i8(<16 x i8> %v) {
; GFX8-SDAG-NEXT: v_max_u16_e32 v0, v0, v2
; GFX8-SDAG-NEXT: v_max_u16_sdwa v1, v1, v3 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-SDAG-NEXT: v_mov_b32_e32 v1, 8
-; GFX8-SDAG-NEXT: v_lshrrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX8-SDAG-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v0
+; GFX8-SDAG-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-GISEL-LABEL: test_vector_reduce_umax_v16i8:
@@ -1093,25 +1091,24 @@ define i8 @test_vector_reduce_umax_v16i8(<16 x i8> %v) {
; GFX9-SDAG-LABEL: test_vector_reduce_umax_v16i8:
; GFX9-SDAG: ; %bb.0: ; %entry
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX9-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX9-SDAG-NEXT: v_max_u16_sdwa v7, v7, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX9-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX9-SDAG-NEXT: v_max_u16_sdwa v5, v5, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX9-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX9-SDAG-NEXT: v_max_u16_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_max_u16_sdwa v6, v6, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_max3_u16 v3, v3, v11, v7
-; GFX9-SDAG-NEXT: v_max_u16_sdwa v4, v4, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX9-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX9-SDAG-NEXT: v_max_u16_sdwa v7, v7, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max_u16_sdwa v3, v3, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-SDAG-NEXT: v_max_u16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_max3_u16 v2, v2, v10, v6
-; GFX9-SDAG-NEXT: v_max3_u16 v1, v1, v5, v3
-; GFX9-SDAG-NEXT: v_max3_u16 v0, v0, v4, v2
+; GFX9-SDAG-NEXT: v_max3_u16 v1, v1, v5, v13
+; GFX9-SDAG-NEXT: v_max_u16_sdwa v6, v6, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max_u16_sdwa v2, v2, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_max3_u16 v0, v0, v4, v12
+; GFX9-SDAG-NEXT: v_max3_u16 v1, v1, v3, v7
+; GFX9-SDAG-NEXT: v_max3_u16 v0, v0, v2, v6
; GFX9-SDAG-NEXT: v_lshlrev_b16_e32 v1, 8, v1
; GFX9-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 8
-; GFX9-SDAG-NEXT: v_lshrrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX9-SDAG-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v0
+; GFX9-SDAG-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-GISEL-LABEL: test_vector_reduce_umax_v16i8:
@@ -1151,38 +1148,38 @@ define i8 @test_vector_reduce_umax_v16i8(<16 x i8> %v) {
; GFX10-SDAG-LABEL: test_vector_reduce_umax_v16i8:
; GFX10-SDAG: ; %bb.0: ; %entry
; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_and_b32_e32 v15, 0xff, v15
-; GFX10-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX10-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX10-SDAG-NEXT: v_and_b32_e32 v14, 0xff, v14
-; GFX10-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
; GFX10-SDAG-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX10-SDAG-NEXT: v_and_b32_e32 v13, 0xff, v13
; GFX10-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX10-SDAG-NEXT: v_max_u16 v7, v7, v15
-; GFX10-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX10-SDAG-NEXT: v_and_b32_e32 v8, 0xff, v8
; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-SDAG-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-SDAG-NEXT: v_max_u16 v1, v1, v9
; GFX10-SDAG-NEXT: v_and_b32_e32 v12, 0xff, v12
; GFX10-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX10-SDAG-NEXT: v_max_u16 v5, v5, v13
-; GFX10-SDAG-NEXT: v_max_u16 v1, v1, v9
-; GFX10-SDAG-NEXT: v_max_u16 v6, v6, v14
-; GFX10-SDAG-NEXT: v_max3_u16 v3, v3, v11, v7
-; GFX10-SDAG-NEXT: v_max_u16 v4, v4, v12
+; GFX10-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-SDAG-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-SDAG-NEXT: v_max_u16 v7, v7, v15
+; GFX10-SDAG-NEXT: v_max_u16 v3, v3, v11
; GFX10-SDAG-NEXT: v_max_u16 v0, v0, v8
-; GFX10-SDAG-NEXT: v_max3_u16 v2, v2, v10, v6
-; GFX10-SDAG-NEXT: v_max3_u16 v1, v1, v5, v3
-; GFX10-SDAG-NEXT: v_max3_u16 v0, v0, v4, v2
+; GFX10-SDAG-NEXT: v_max3_u16 v1, v1, v5, v13
+; GFX10-SDAG-NEXT: v_max_u16 v5, v6, v14
+; GFX10-SDAG-NEXT: v_max_u16 v2, v2, v10
+; GFX10-SDAG-NEXT: v_max3_u16 v0, v0, v4, v12
+; GFX10-SDAG-NEXT: v_max3_u16 v1, v1, v3, v7
+; GFX10-SDAG-NEXT: v_max3_u16 v0, v0, v2, v5
; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1
; GFX10-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, 8
-; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v0
-; GFX10-SDAG-NEXT: v_lshrrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-SDAG-NEXT: v_max_u16 v0, v2, v0
+; GFX10-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v0
+; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-SDAG-NEXT: v_max_u16 v0, v0, v1
; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-GISEL-LABEL: test_vector_reduce_umax_v16i8:
@@ -1237,84 +1234,82 @@ define i8 @test_vector_reduce_umax_v16i8(<16 x i8> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_umax_v16i8:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v15.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v7.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v14.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v9.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v13.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v13.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
-; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v0.h, v0.h, v10.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v8.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v11.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v15.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
+; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v0.h, v0.h, v9.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v12.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l
-; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v4.h, v5.l, v4.h
-; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v1.l, v1.l, v1.h
-; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v1.h, v6.l, v6.h
-; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v3.l, v3.h, v0.h
-; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v3.l, v4.l, v5.h
-; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v10.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
+; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v3.h, v6.h, v3.h
+; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v2.h, v3.l, v2.h
+; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v0.h, v5.l, v5.h
+; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v1.l, v6.l, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v1.h, v2.l, v1.h
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v1.h, v2.l, v2.h, v1.h
-; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v1.l, v4.h, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v4.l, v4.h
+; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v0.h, v2.h, v3.h
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v3.l, v1.h
+; GFX11-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v1.h, v1.l
; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v0.h
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_umax_v16i8:
; GFX11-SDAG-FAKE16: ; %bb.0: ; %entry
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v15
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v14
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v13, 0xff, v13
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v7, v7, v15
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v8, 0xff, v8
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v1, v1, v9
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v12, 0xff, v12
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v5, v5, v13
-; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v1, v1, v9
-; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v6, v6, v14
-; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v3, v3, v11, v7
-; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v4, v4, v12
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v7, v7, v15
+; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v3, v3, v11
; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v0, v0, v8
+; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v5, v13
+; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v5, v6, v14
+; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v2, v2, v10
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v2, v2, v10, v6
-; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v5, v3
+; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v4, v12
+; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v3, v7
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v4, v2
+; GFX11-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v2, v5
; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-SDAG-FAKE16-NEXT: v_max_u16 v0, v0, v1
; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -1382,44 +1377,42 @@ define i8 @test_vector_reduce_umax_v16i8(<16 x i8> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v15.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v7.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v14.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v9.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v13.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v13.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
-; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v0.h, v0.h, v10.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v8.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v11.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v15.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
+; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v0.h, v0.h, v9.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v12.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l
-; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v4.h, v5.l, v4.h
-; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v1.l, v1.l, v1.h
-; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v1.h, v6.l, v6.h
-; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v3.l, v3.h, v0.h
-; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v3.l, v4.l, v5.h
-; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v10.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
+; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v3.h, v6.h, v3.h
+; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v2.h, v3.l, v2.h
+; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v0.h, v5.l, v5.h
+; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v1.l, v6.l, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v1.h, v2.l, v1.h
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v1.h, v2.l, v2.h, v1.h
-; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v1.l, v4.h, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v4.l, v4.h
+; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.h, v0.h, v2.h, v3.h
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v3.l, v1.h
+; GFX12-SDAG-TRUE16-NEXT: v_max3_u16 v0.l, v0.l, v1.h, v1.l
; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-SDAG-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_max_u16 v0.l, v0.l, v0.h
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_umax_v16i8:
@@ -1429,41 +1422,41 @@ define i8 @test_vector_reduce_umax_v16i8(<16 x i8> %v) {
; GFX12-SDAG-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v15
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v14
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v13, 0xff, v13
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v7, v7, v15
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v8, 0xff, v8
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v1, v1, v9
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v12, 0xff, v12
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v5, v5, v13
-; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v1, v1, v9
-; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v6, v6, v14
-; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v3, v3, v11, v7
-; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v4, v4, v12
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v7, v7, v15
+; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v3, v3, v11
; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v0, v0, v8
+; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v5, v13
+; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v5, v6, v14
+; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v2, v2, v10
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v2, v2, v10, v6
-; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v5, v3
+; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v4, v12
+; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v1, v1, v3, v7
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v4, v2
+; GFX12-SDAG-FAKE16-NEXT: v_max3_u16 v0, v0, v2, v5
; GFX12-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX12-SDAG-FAKE16-NEXT: v_max_u16 v0, v0, v1
; GFX12-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -1940,18 +1933,18 @@ define i16 @test_vector_reduce_umax_v8i16(<8 x i16> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_umax_v8i16:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX7-SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX7-SDAG-NEXT: v_max_u32_e32 v2, v2, v6
-; GFX7-SDAG-NEXT: v_max_u32_e32 v3, v3, v7
-; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-SDAG-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -2136,32 +2129,32 @@ define i16 @test_vector_reduce_umax_v16i16(<16 x i16> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_umax_v16i16:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX7-SDAG-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX7-SDAG-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX7-SDAG-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX7-SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX7-SDAG-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX7-SDAG-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX7-SDAG-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX7-SDAG-NEXT: v_max_u32_e32 v7, v7, v15
-; GFX7-SDAG-NEXT: v_max_u32_e32 v6, v6, v14
-; GFX7-SDAG-NEXT: v_max_u32_e32 v4, v4, v12
-; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v8
-; GFX7-SDAG-NEXT: v_max_u32_e32 v5, v5, v13
+; GFX7-SDAG-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-SDAG-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX7-SDAG-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX7-SDAG-NEXT: v_max_u32_e32 v1, v1, v9
-; GFX7-SDAG-NEXT: v_max3_u32 v2, v2, v10, v6
-; GFX7-SDAG-NEXT: v_max3_u32 v3, v3, v11, v7
-; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v8
+; GFX7-SDAG-NEXT: v_max_u32_e32 v6, v6, v14
+; GFX7-SDAG-NEXT: v_max_u32_e32 v2, v2, v10
+; GFX7-SDAG-NEXT: v_max_u32_e32 v7, v7, v15
+; GFX7-SDAG-NEXT: v_max_u32_e32 v3, v3, v11
+; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v4, v12
+; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v5, v13
+; GFX7-SDAG-NEXT: v_max3_u32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_max3_u32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_max_u32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll
index e3a7ae5..115b05a 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll
@@ -485,18 +485,18 @@ define i8 @test_vector_reduce_umin_v8i8(<8 x i8> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_umin_v8i8:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX7-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX7-SDAG-NEXT: v_min_u32_e32 v2, v2, v6
-; GFX7-SDAG-NEXT: v_min_u32_e32 v3, v3, v7
-; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX7-SDAG-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -549,15 +549,15 @@ define i8 @test_vector_reduce_umin_v8i8(<8 x i8> %v) {
; GFX9-SDAG-LABEL: test_vector_reduce_umin_v8i8:
; GFX9-SDAG: ; %bb.0: ; %entry
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX9-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX9-SDAG-NEXT: v_min_u16_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_min3_u16 v1, v1, v5, v3
-; GFX9-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX9-SDAG-NEXT: v_min_u16_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX9-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-SDAG-NEXT: v_min_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min3_u16 v1, v1, v3, v7
+; GFX9-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX9-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-SDAG-NEXT: v_min_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-SDAG-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; GFX9-SDAG-NEXT: v_min3_u16 v0, v0, v4, v2
+; GFX9-SDAG-NEXT: v_min3_u16 v0, v0, v2, v6
; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX9-SDAG-NEXT: v_min_u16_e32 v0, v0, v1
; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -578,20 +578,20 @@ define i8 @test_vector_reduce_umin_v8i8(<8 x i8> %v) {
; GFX10-SDAG-LABEL: test_vector_reduce_umin_v8i8:
; GFX10-SDAG: ; %bb.0: ; %entry
; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX10-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX10-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX10-SDAG-NEXT: v_min_u16 v3, v3, v7
+; GFX10-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX10-SDAG-NEXT: v_min_u16 v2, v2, v6
-; GFX10-SDAG-NEXT: v_min3_u16 v1, v1, v5, v3
-; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX10-SDAG-NEXT: v_min_u16 v1, v1, v5
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-SDAG-NEXT: v_min_u16 v0, v0, v4
+; GFX10-SDAG-NEXT: v_min3_u16 v1, v1, v3, v7
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v6
; GFX10-SDAG-NEXT: v_mov_b32_e32 v4, 8
; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX10-SDAG-NEXT: v_min3_u16 v0, v0, v3, v2
+; GFX10-SDAG-NEXT: v_min3_u16 v0, v0, v2, v3
; GFX10-SDAG-NEXT: v_lshrrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; GFX10-SDAG-NEXT: v_min_u16 v0, v0, v1
; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -620,24 +620,24 @@ define i8 @test_vector_reduce_umin_v8i8(<8 x i8> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_umin_v8i8:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v5.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v5.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v6.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v1.h, v3.l, v1.h
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v1.l, v1.l, v3.h, v1.h
+; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v1.l, v1.l, v1.h
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v0.l, v0.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v1.l, v1.l, v3.l, v3.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v0.h, v1.h, v0.h
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v1.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v1.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v1.h, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v2
; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v0.l, v0.l, v1.l
@@ -646,23 +646,23 @@ define i8 @test_vector_reduce_umin_v8i8(<8 x i8> %v) {
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_umin_v8i8:
; GFX11-SDAG-FAKE16: ; %bb.0: ; %entry
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v3, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v5, v3
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v6
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v1, v1, v5
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v3, v7
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v6
; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v2, v2, v3
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v0, v0, v3
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v4, v2
+; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v2, v4
; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v0, v0, v1
@@ -699,24 +699,24 @@ define i8 @test_vector_reduce_umin_v8i8(<8 x i8> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v5.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v5.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v6.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v1.h, v3.l, v1.h
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v1.l, v1.l, v3.h, v1.h
+; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v1.l, v1.l, v1.h
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v0.l, v0.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v1.l, v1.l, v3.l, v3.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v0.h, v1.h, v0.h
-; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v1.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v1.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v1.h, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v2
; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v0.l, v0.l, v1.l
@@ -729,23 +729,23 @@ define i8 @test_vector_reduce_umin_v8i8(<8 x i8> %v) {
; GFX12-SDAG-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v3, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v5, v3
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v6
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v1, v1, v5
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v3, v7
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v6
; GFX12-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
-; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v2, v2, v3
+; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v0, v0, v3
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v4, v2
+; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v2, v4
; GFX12-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v0, v0, v1
@@ -787,32 +787,32 @@ define i8 @test_vector_reduce_umin_v16i8(<16 x i8> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_umin_v16i8:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_and_b32_e32 v14, 0xff, v14
-; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX7-SDAG-NEXT: v_and_b32_e32 v15, 0xff, v15
-; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX7-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX7-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v9, 0xff, v9
; GFX7-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX7-SDAG-NEXT: v_and_b32_e32 v13, 0xff, v13
; GFX7-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX7-SDAG-NEXT: v_and_b32_e32 v8, 0xff, v8
-; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v12, 0xff, v12
; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX7-SDAG-NEXT: v_min_u32_e32 v7, v7, v15
-; GFX7-SDAG-NEXT: v_min_u32_e32 v6, v6, v14
-; GFX7-SDAG-NEXT: v_min_u32_e32 v4, v4, v12
-; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v8
-; GFX7-SDAG-NEXT: v_min_u32_e32 v5, v5, v13
+; GFX7-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-SDAG-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX7-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
; GFX7-SDAG-NEXT: v_min_u32_e32 v1, v1, v9
-; GFX7-SDAG-NEXT: v_min3_u32 v2, v2, v10, v6
-; GFX7-SDAG-NEXT: v_min3_u32 v3, v3, v11, v7
-; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v8
+; GFX7-SDAG-NEXT: v_min_u32_e32 v6, v6, v14
+; GFX7-SDAG-NEXT: v_min_u32_e32 v2, v2, v10
+; GFX7-SDAG-NEXT: v_min_u32_e32 v7, v7, v15
+; GFX7-SDAG-NEXT: v_min_u32_e32 v3, v3, v11
+; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v4, v12
+; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v5, v13
+; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -899,20 +899,20 @@ define i8 @test_vector_reduce_umin_v16i8(<16 x i8> %v) {
; GFX9-SDAG-LABEL: test_vector_reduce_umin_v16i8:
; GFX9-SDAG: ; %bb.0: ; %entry
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX9-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX9-SDAG-NEXT: v_min_u16_sdwa v7, v7, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX9-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX9-SDAG-NEXT: v_min_u16_sdwa v5, v5, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX9-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX9-SDAG-NEXT: v_min_u16_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_min_u16_sdwa v6, v6, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_min3_u16 v3, v3, v11, v7
-; GFX9-SDAG-NEXT: v_min_u16_sdwa v4, v4, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX9-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX9-SDAG-NEXT: v_min_u16_sdwa v7, v7, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min_u16_sdwa v3, v3, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
; GFX9-SDAG-NEXT: v_min_u16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
-; GFX9-SDAG-NEXT: v_min3_u16 v2, v2, v10, v6
-; GFX9-SDAG-NEXT: v_min3_u16 v1, v1, v5, v3
-; GFX9-SDAG-NEXT: v_min3_u16 v0, v0, v4, v2
+; GFX9-SDAG-NEXT: v_min3_u16 v1, v1, v5, v13
+; GFX9-SDAG-NEXT: v_min_u16_sdwa v6, v6, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min_u16_sdwa v2, v2, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-SDAG-NEXT: v_min3_u16 v0, v0, v4, v12
+; GFX9-SDAG-NEXT: v_min3_u16 v1, v1, v3, v7
+; GFX9-SDAG-NEXT: v_min3_u16 v0, v0, v2, v6
; GFX9-SDAG-NEXT: v_lshlrev_b16_e32 v1, 8, v1
; GFX9-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 8
@@ -944,32 +944,32 @@ define i8 @test_vector_reduce_umin_v16i8(<16 x i8> %v) {
; GFX10-SDAG-LABEL: test_vector_reduce_umin_v16i8:
; GFX10-SDAG: ; %bb.0: ; %entry
; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_and_b32_e32 v15, 0xff, v15
-; GFX10-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX10-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX10-SDAG-NEXT: v_and_b32_e32 v14, 0xff, v14
-; GFX10-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
; GFX10-SDAG-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX10-SDAG-NEXT: v_and_b32_e32 v13, 0xff, v13
; GFX10-SDAG-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX10-SDAG-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX10-SDAG-NEXT: v_min_u16 v7, v7, v15
-; GFX10-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX10-SDAG-NEXT: v_and_b32_e32 v8, 0xff, v8
; GFX10-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-SDAG-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-SDAG-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-SDAG-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-SDAG-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-SDAG-NEXT: v_min_u16 v1, v1, v9
; GFX10-SDAG-NEXT: v_and_b32_e32 v12, 0xff, v12
; GFX10-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX10-SDAG-NEXT: v_min_u16 v5, v5, v13
-; GFX10-SDAG-NEXT: v_min_u16 v1, v1, v9
-; GFX10-SDAG-NEXT: v_min_u16 v6, v6, v14
-; GFX10-SDAG-NEXT: v_min3_u16 v3, v3, v11, v7
-; GFX10-SDAG-NEXT: v_min_u16 v4, v4, v12
+; GFX10-SDAG-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX10-SDAG-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-SDAG-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-SDAG-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-SDAG-NEXT: v_min_u16 v7, v7, v15
+; GFX10-SDAG-NEXT: v_min_u16 v3, v3, v11
; GFX10-SDAG-NEXT: v_min_u16 v0, v0, v8
-; GFX10-SDAG-NEXT: v_min3_u16 v2, v2, v10, v6
-; GFX10-SDAG-NEXT: v_min3_u16 v1, v1, v5, v3
-; GFX10-SDAG-NEXT: v_min3_u16 v0, v0, v4, v2
+; GFX10-SDAG-NEXT: v_min3_u16 v1, v1, v5, v13
+; GFX10-SDAG-NEXT: v_min_u16 v5, v6, v14
+; GFX10-SDAG-NEXT: v_min_u16 v2, v2, v10
+; GFX10-SDAG-NEXT: v_min3_u16 v0, v0, v4, v12
+; GFX10-SDAG-NEXT: v_min3_u16 v1, v1, v3, v7
+; GFX10-SDAG-NEXT: v_min3_u16 v0, v0, v2, v5
; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1
; GFX10-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
; GFX10-SDAG-NEXT: v_mov_b32_e32 v1, 8
@@ -1018,34 +1018,34 @@ define i8 @test_vector_reduce_umin_v16i8(<16 x i8> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_umin_v16i8:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v15.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v7.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v14.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v9.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v13.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v13.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
-; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v0.h, v0.h, v10.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v8.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
-; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v11.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v15.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
+; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v0.h, v0.h, v9.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v12.l
; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l
-; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v4.h, v5.l, v4.h
-; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v1.l, v1.l, v1.h
-; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v1.h, v6.l, v6.h
-; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v0.h, v3.l, v3.h, v0.h
-; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v3.l, v4.l, v5.h
-; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v0.l, v0.l, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v10.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
+; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v3.h, v6.h, v3.h
+; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v2.h, v3.l, v2.h
+; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v0.l, v0.l, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v0.h, v0.h, v5.l, v5.h
+; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v1.l, v6.l, v7.l
+; GFX11-SDAG-TRUE16-NEXT: v_min_u16 v1.h, v2.l, v1.h
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v1.h, v2.l, v2.h, v1.h
-; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v0.h, v1.l, v4.h, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v4.l, v4.h
+; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v0.h, v0.h, v2.h, v3.h
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v3.l, v1.h
+; GFX11-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v1.h, v1.l
; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
@@ -1061,34 +1061,34 @@ define i8 @test_vector_reduce_umin_v16i8(<16 x i8> %v) {
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_umin_v16i8:
; GFX11-SDAG-FAKE16: ; %bb.0: ; %entry
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v15
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v14
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v13, 0xff, v13
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v7, v7, v15
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v8, 0xff, v8
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v1, v1, v9
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v12, 0xff, v12
; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v5, v5, v13
-; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v1, v1, v9
-; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v6, v6, v14
-; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v3, v3, v11, v7
-; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v4, v4, v12
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v7, v7, v15
+; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v3, v3, v11
; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v0, v0, v8
+; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v5, v13
+; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v5, v6, v14
+; GFX11-SDAG-FAKE16-NEXT: v_min_u16 v2, v2, v10
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v2, v2, v10, v6
-; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v5, v3
+; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v4, v12
+; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v3, v7
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v4, v2
+; GFX11-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v2, v5
; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1147,34 +1147,34 @@ define i8 @test_vector_reduce_umin_v16i8(<16 x i8> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v15.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v7.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v14.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v9.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v13.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v13.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l
-; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v0.h, v0.h, v10.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v8.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l
-; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v11.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v15.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l
+; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v0.h, v0.h, v9.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v12.l
; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l
-; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v4.h, v5.l, v4.h
-; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v1.l, v1.l, v1.h
-; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v1.h, v6.l, v6.h
-; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v0.h, v3.l, v3.h, v0.h
-; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v3.l, v4.l, v5.h
-; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v0.l, v0.l, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v10.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
+; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v3.h, v6.h, v3.h
+; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v2.h, v3.l, v2.h
+; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v0.l, v0.l, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v0.h, v0.h, v5.l, v5.h
+; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v1.l, v6.l, v7.l
+; GFX12-SDAG-TRUE16-NEXT: v_min_u16 v1.h, v2.l, v1.h
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v1.h, v2.l, v2.h, v1.h
-; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v0.h, v1.l, v4.h, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v4.l, v4.h
+; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v0.h, v0.h, v2.h, v3.h
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v3.l, v1.h
+; GFX12-SDAG-TRUE16-NEXT: v_min3_u16 v0.l, v0.l, v1.h, v1.l
; GFX12-SDAG-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v0.h
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-SDAG-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v0.h
@@ -1194,34 +1194,34 @@ define i8 @test_vector_reduce_umin_v16i8(<16 x i8> %v) {
; GFX12-SDAG-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v15
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v14
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v13, 0xff, v13
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v7, v7, v15
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v10
-; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v8, 0xff, v8
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v1, v1, v9
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v12, 0xff, v12
; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v5, v5, v13
-; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v1, v1, v9
-; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v6, v6, v14
-; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v3, v3, v11, v7
-; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v4, v4, v12
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-SDAG-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v7, v7, v15
+; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v3, v3, v11
; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v0, v0, v8
+; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v5, v13
+; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v5, v6, v14
+; GFX12-SDAG-FAKE16-NEXT: v_min_u16 v2, v2, v10
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v2, v2, v10, v6
-; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v5, v3
+; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v4, v12
+; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v1, v1, v3, v7
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v4, v2
+; GFX12-SDAG-FAKE16-NEXT: v_min3_u16 v0, v0, v2, v5
; GFX12-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1
; GFX12-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, v0, v1
@@ -1685,18 +1685,18 @@ define i16 @test_vector_reduce_umin_v8i16(<8 x i16> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_umin_v8i16:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX7-SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX7-SDAG-NEXT: v_min_u32_e32 v2, v2, v6
-; GFX7-SDAG-NEXT: v_min_u32_e32 v3, v3, v7
-; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX7-SDAG-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
@@ -1878,32 +1878,32 @@ define i16 @test_vector_reduce_umin_v16i16(<16 x i16> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_umin_v16i16:
; GFX7-SDAG: ; %bb.0: ; %entry
; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-SDAG-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX7-SDAG-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX7-SDAG-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX7-SDAG-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX7-SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX7-SDAG-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX7-SDAG-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX7-SDAG-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7-SDAG-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX7-SDAG-NEXT: v_min_u32_e32 v7, v7, v15
-; GFX7-SDAG-NEXT: v_min_u32_e32 v6, v6, v14
-; GFX7-SDAG-NEXT: v_min_u32_e32 v4, v4, v12
-; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v8
-; GFX7-SDAG-NEXT: v_min_u32_e32 v5, v5, v13
+; GFX7-SDAG-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX7-SDAG-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-SDAG-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX7-SDAG-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX7-SDAG-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-SDAG-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX7-SDAG-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX7-SDAG-NEXT: v_min_u32_e32 v1, v1, v9
-; GFX7-SDAG-NEXT: v_min3_u32 v2, v2, v10, v6
-; GFX7-SDAG-NEXT: v_min3_u32 v3, v3, v11, v7
-; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v5, v3
-; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v4, v2
+; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v8
+; GFX7-SDAG-NEXT: v_min_u32_e32 v6, v6, v14
+; GFX7-SDAG-NEXT: v_min_u32_e32 v2, v2, v10
+; GFX7-SDAG-NEXT: v_min_u32_e32 v7, v7, v15
+; GFX7-SDAG-NEXT: v_min_u32_e32 v3, v3, v11
+; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v4, v12
+; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v5, v13
+; GFX7-SDAG-NEXT: v_min3_u32 v1, v1, v3, v7
+; GFX7-SDAG-NEXT: v_min3_u32 v0, v0, v2, v6
; GFX7-SDAG-NEXT: v_min_u32_e32 v0, v0, v1
; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll b/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll
new file mode 100644
index 0000000..64d055b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll
@@ -0,0 +1,531 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -O3 -mtriple=amdgcn -mcpu=fiji %s -o - | FileCheck %s --check-prefixes=GFX8,DAGISEL-GFX8
+; RUN: llc -O3 -mtriple=amdgcn -mcpu=gfx942 %s -o - | FileCheck %s --check-prefixes=GFX942,DAGISEL-GFX942
+; RUN: llc -O3 -mtriple=amdgcn -mcpu=gfx1200 %s -o - | FileCheck %s --check-prefixes=GFX12,DAGISEL-GFX12
+
+; RUN: llc -O3 -global-isel -mtriple=amdgcn -mcpu=fiji %s -o - | FileCheck %s --check-prefixes=GFX8,GISEL-GFX8
+; RUN: llc -O3 -global-isel -mtriple=amdgcn -mcpu=gfx942 %s -o - | FileCheck %s --check-prefixes=GFX942,GISEL-GFX942
+; RUN: llc -O3 -global-isel -mtriple=amdgcn -mcpu=gfx1200 %s -o - | FileCheck %s --check-prefixes=GFX12,GISEL-GFX12
+
+; (workitem_id_x | workitem_id_y | workitem_id_z) == 0
+define i1 @workitem_zero() {
+; DAGISEL-GFX8-LABEL: workitem_zero:
+; DAGISEL-GFX8: ; %bb.0: ; %entry
+; DAGISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX8-NEXT: v_and_b32_e32 v0, 0x3fffffff, v31
+; DAGISEL-GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; DAGISEL-GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; DAGISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX942-LABEL: workitem_zero:
+; DAGISEL-GFX942: ; %bb.0: ; %entry
+; DAGISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX942-NEXT: v_and_b32_e32 v0, 0x3fffffff, v31
+; DAGISEL-GFX942-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; DAGISEL-GFX942-NEXT: s_nop 1
+; DAGISEL-GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; DAGISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX12-LABEL: workitem_zero:
+; DAGISEL-GFX12: ; %bb.0: ; %entry
+; DAGISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; DAGISEL-GFX12-NEXT: v_and_b32_e32 v0, 0x3fffffff, v31
+; DAGISEL-GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DAGISEL-GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffd
+; DAGISEL-GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; DAGISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX8-LABEL: workitem_zero:
+; GISEL-GFX8: ; %bb.0: ; %entry
+; GISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX8-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GISEL-GFX8-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GISEL-GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX8-NEXT: v_bfe_u32 v1, v31, 20, 10
+; GISEL-GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GISEL-GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX942-LABEL: workitem_zero:
+; GISEL-GFX942: ; %bb.0: ; %entry
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX942-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GISEL-GFX942-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GISEL-GFX942-NEXT: v_bfe_u32 v2, v31, 20, 10
+; GISEL-GFX942-NEXT: v_or3_b32 v0, v0, v1, v2
+; GISEL-GFX942-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GISEL-GFX942-NEXT: s_nop 1
+; GISEL-GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX12-LABEL: workitem_zero:
+; GISEL-GFX12: ; %bb.0: ; %entry
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GISEL-GFX12-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GISEL-GFX12-NEXT: v_bfe_u32 v2, v31, 20, 10
+; GISEL-GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX12-NEXT: v_or3_b32 v0, v0, v1, v2
+; GISEL-GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffd
+; GISEL-GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %0 = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %1 = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %or = or i32 %0, %1
+ %2 = tail call i32 @llvm.amdgcn.workitem.id.z()
+ %or1 = or i32 %or, %2
+ %cmp = icmp eq i32 %or1, 0
+ ret i1 %cmp
+}
+
+; (workitem_id_x | workitem_id_y | workitem_id_z) != 0
+define i1 @workitem_nonzero() {
+; DAGISEL-GFX8-LABEL: workitem_nonzero:
+; DAGISEL-GFX8: ; %bb.0: ; %entry
+; DAGISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX8-NEXT: v_and_b32_e32 v0, 0x3fffffff, v31
+; DAGISEL-GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; DAGISEL-GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; DAGISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX942-LABEL: workitem_nonzero:
+; DAGISEL-GFX942: ; %bb.0: ; %entry
+; DAGISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX942-NEXT: v_and_b32_e32 v0, 0x3fffffff, v31
+; DAGISEL-GFX942-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; DAGISEL-GFX942-NEXT: s_nop 1
+; DAGISEL-GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; DAGISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX12-LABEL: workitem_nonzero:
+; DAGISEL-GFX12: ; %bb.0: ; %entry
+; DAGISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; DAGISEL-GFX12-NEXT: v_and_b32_e32 v0, 0x3fffffff, v31
+; DAGISEL-GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DAGISEL-GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffd
+; DAGISEL-GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; DAGISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX8-LABEL: workitem_nonzero:
+; GISEL-GFX8: ; %bb.0: ; %entry
+; GISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX8-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GISEL-GFX8-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GISEL-GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX8-NEXT: v_bfe_u32 v1, v31, 20, 10
+; GISEL-GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GISEL-GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX942-LABEL: workitem_nonzero:
+; GISEL-GFX942: ; %bb.0: ; %entry
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX942-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GISEL-GFX942-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GISEL-GFX942-NEXT: v_bfe_u32 v2, v31, 20, 10
+; GISEL-GFX942-NEXT: v_or3_b32 v0, v0, v1, v2
+; GISEL-GFX942-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-GFX942-NEXT: s_nop 1
+; GISEL-GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX12-LABEL: workitem_nonzero:
+; GISEL-GFX12: ; %bb.0: ; %entry
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GISEL-GFX12-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GISEL-GFX12-NEXT: v_bfe_u32 v2, v31, 20, 10
+; GISEL-GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX12-NEXT: v_or3_b32 v0, v0, v1, v2
+; GISEL-GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffd
+; GISEL-GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %0 = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %1 = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %or = or i32 %0, %1
+ %2 = tail call i32 @llvm.amdgcn.workitem.id.z()
+ %or1 = or i32 %or, %2
+ %cmp = icmp ne i32 %or1, 0
+ ret i1 %cmp
+}
+
+; (workgroup_id_x | workgroup_id_y | workgroup_id_z) == 0
+define i1 @workgroup_zero() {
+; DAGISEL-GFX8-LABEL: workgroup_zero:
+; DAGISEL-GFX8: ; %bb.0: ; %entry
+; DAGISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX8-NEXT: s_or_b32 s4, s12, s13
+; DAGISEL-GFX8-NEXT: s_or_b32 s4, s4, s14
+; DAGISEL-GFX8-NEXT: s_cmp_eq_u32 s4, 0
+; DAGISEL-GFX8-NEXT: s_cselect_b64 s[4:5], -1, 0
+; DAGISEL-GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; DAGISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX942-LABEL: workgroup_zero:
+; DAGISEL-GFX942: ; %bb.0: ; %entry
+; DAGISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX942-NEXT: s_or_b32 s0, s12, s13
+; DAGISEL-GFX942-NEXT: s_or_b32 s0, s0, s14
+; DAGISEL-GFX942-NEXT: s_cmp_eq_u32 s0, 0
+; DAGISEL-GFX942-NEXT: s_cselect_b64 s[0:1], -1, 0
+; DAGISEL-GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; DAGISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX12-LABEL: workgroup_zero:
+; DAGISEL-GFX12: ; %bb.0: ; %entry
+; DAGISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; DAGISEL-GFX12-NEXT: s_and_b32 s0, ttmp7, 0xffff
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_lshr_b32 s1, ttmp7, 16
+; DAGISEL-GFX12-NEXT: s_or_b32 s0, ttmp9, s0
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_or_b32 s0, s0, s1
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_cmp_eq_u32 s0, 0
+; DAGISEL-GFX12-NEXT: s_cselect_b32 s0, -1, 0
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; DAGISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX8-LABEL: workgroup_zero:
+; GISEL-GFX8: ; %bb.0: ; %entry
+; GISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX8-NEXT: s_or_b32 s4, s12, s13
+; GISEL-GFX8-NEXT: s_or_b32 s4, s4, s14
+; GISEL-GFX8-NEXT: s_cmp_eq_u32 s4, 0
+; GISEL-GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GISEL-GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX942-LABEL: workgroup_zero:
+; GISEL-GFX942: ; %bb.0: ; %entry
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX942-NEXT: s_or_b32 s0, s12, s13
+; GISEL-GFX942-NEXT: s_or_b32 s0, s0, s14
+; GISEL-GFX942-NEXT: s_cmp_eq_u32 s0, 0
+; GISEL-GFX942-NEXT: s_cselect_b32 s0, 1, 0
+; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, s0
+; GISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX12-LABEL: workgroup_zero:
+; GISEL-GFX12: ; %bb.0: ; %entry
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: s_and_b32 s0, ttmp7, 0xffff
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_lshr_b32 s1, ttmp7, 16
+; GISEL-GFX12-NEXT: s_or_b32 s0, ttmp9, s0
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_or_b32 s0, s0, s1
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_cmp_eq_u32 s0, 0
+; GISEL-GFX12-NEXT: s_cselect_b32 s0, 1, 0
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %0 = tail call i32 @llvm.amdgcn.workgroup.id.x()
+ %1 = tail call i32 @llvm.amdgcn.workgroup.id.y()
+ %or = or i32 %0, %1
+ %2 = tail call i32 @llvm.amdgcn.workgroup.id.z()
+ %or1 = or i32 %or, %2
+ %cmp = icmp eq i32 %or1, 0
+ ret i1 %cmp
+}
+
+; (workgroup_id_x | workgroup_id_y | workgroup_id_z) != 0
+define i1 @workgroup_nonzero() {
+; DAGISEL-GFX8-LABEL: workgroup_nonzero:
+; DAGISEL-GFX8: ; %bb.0: ; %entry
+; DAGISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX8-NEXT: s_or_b32 s4, s12, s13
+; DAGISEL-GFX8-NEXT: s_or_b32 s4, s4, s14
+; DAGISEL-GFX8-NEXT: s_cmp_lg_u32 s4, 0
+; DAGISEL-GFX8-NEXT: s_cselect_b64 s[4:5], -1, 0
+; DAGISEL-GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; DAGISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX942-LABEL: workgroup_nonzero:
+; DAGISEL-GFX942: ; %bb.0: ; %entry
+; DAGISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX942-NEXT: s_or_b32 s0, s12, s13
+; DAGISEL-GFX942-NEXT: s_or_b32 s0, s0, s14
+; DAGISEL-GFX942-NEXT: s_cmp_lg_u32 s0, 0
+; DAGISEL-GFX942-NEXT: s_cselect_b64 s[0:1], -1, 0
+; DAGISEL-GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; DAGISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX12-LABEL: workgroup_nonzero:
+; DAGISEL-GFX12: ; %bb.0: ; %entry
+; DAGISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; DAGISEL-GFX12-NEXT: s_and_b32 s0, ttmp7, 0xffff
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_lshr_b32 s1, ttmp7, 16
+; DAGISEL-GFX12-NEXT: s_or_b32 s0, ttmp9, s0
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_or_b32 s0, s0, s1
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_cmp_lg_u32 s0, 0
+; DAGISEL-GFX12-NEXT: s_cselect_b32 s0, -1, 0
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; DAGISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX8-LABEL: workgroup_nonzero:
+; GISEL-GFX8: ; %bb.0: ; %entry
+; GISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX8-NEXT: s_or_b32 s4, s12, s13
+; GISEL-GFX8-NEXT: s_or_b32 s4, s4, s14
+; GISEL-GFX8-NEXT: s_cmp_lg_u32 s4, 0
+; GISEL-GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GISEL-GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX942-LABEL: workgroup_nonzero:
+; GISEL-GFX942: ; %bb.0: ; %entry
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX942-NEXT: s_or_b32 s0, s12, s13
+; GISEL-GFX942-NEXT: s_or_b32 s0, s0, s14
+; GISEL-GFX942-NEXT: s_cmp_lg_u32 s0, 0
+; GISEL-GFX942-NEXT: s_cselect_b32 s0, 1, 0
+; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, s0
+; GISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX12-LABEL: workgroup_nonzero:
+; GISEL-GFX12: ; %bb.0: ; %entry
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: s_and_b32 s0, ttmp7, 0xffff
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_lshr_b32 s1, ttmp7, 16
+; GISEL-GFX12-NEXT: s_or_b32 s0, ttmp9, s0
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_or_b32 s0, s0, s1
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_cmp_lg_u32 s0, 0
+; GISEL-GFX12-NEXT: s_cselect_b32 s0, 1, 0
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %0 = tail call i32 @llvm.amdgcn.workgroup.id.x()
+ %1 = tail call i32 @llvm.amdgcn.workgroup.id.y()
+ %or = or i32 %0, %1
+ %2 = tail call i32 @llvm.amdgcn.workgroup.id.z()
+ %or1 = or i32 %or, %2
+ %cmp = icmp ne i32 %or1, 0
+ ret i1 %cmp
+}
+
+; (workitem_id_x | workitem_id_y | workitem_id_z | workgroup_id_x | workgroup_id_y | workgroup_id_z) == 0
+define i1 @workitem_workgroup_zero() {
+; GFX8-LABEL: workitem_workgroup_zero:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_or_b32 s4, s12, s13
+; GFX8-NEXT: s_or_b32 s4, s4, s14
+; GFX8-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX8-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX8-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: workitem_workgroup_zero:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_or_b32 s0, s12, s13
+; GFX942-NEXT: s_or_b32 s0, s0, s14
+; GFX942-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX942-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GFX942-NEXT: v_or3_b32 v0, s0, v0, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: workitem_workgroup_zero:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_and_b32 s0, ttmp7, 0xffff
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX12-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_lshr_b32 s1, ttmp7, 16
+; GFX12-NEXT: s_or_b32 s0, ttmp9, s0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_or_b32 s0, s0, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_or3_b32 v0, s0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %0 = tail call i32 @llvm.amdgcn.workgroup.id.x()
+ %1 = tail call i32 @llvm.amdgcn.workgroup.id.y()
+ %or = or i32 %0, %1
+ %2 = tail call i32 @llvm.amdgcn.workgroup.id.z()
+ %or1 = or i32 %or, %2
+ %3 = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %or2 = or i32 %or1, %3
+ %4 = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %or3 = or i32 %or2, %4
+ %5 = tail call i32 @llvm.amdgcn.workitem.id.z()
+ %or4 = or i32 %or3, %5
+ %cmp = icmp eq i32 %or3, 0
+ ret i1 %cmp
+}
+
+; (workitem_id_x | workitem_id_y | workitem_id_z | workgroup_id_x | workgroup_id_y | workgroup_id_z) != 0
+define i1 @workitem_workgroup_nonzero() {
+; GFX8-LABEL: workitem_workgroup_nonzero:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_or_b32 s4, s12, s13
+; GFX8-NEXT: s_or_b32 s4, s4, s14
+; GFX8-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; GFX8-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX8-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v1, v31, 20, 10
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX942-LABEL: workitem_workgroup_nonzero:
+; DAGISEL-GFX942: ; %bb.0: ; %entry
+; DAGISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX942-NEXT: s_or_b32 s0, s12, s13
+; DAGISEL-GFX942-NEXT: s_or_b32 s0, s0, s14
+; DAGISEL-GFX942-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DAGISEL-GFX942-NEXT: v_or_b32_e32 v0, s0, v0
+; DAGISEL-GFX942-NEXT: v_bfe_u32 v1, v31, 20, 10
+; DAGISEL-GFX942-NEXT: v_bfe_u32 v2, v31, 10, 10
+; DAGISEL-GFX942-NEXT: v_or3_b32 v0, v0, v2, v1
+; DAGISEL-GFX942-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; DAGISEL-GFX942-NEXT: s_nop 1
+; DAGISEL-GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; DAGISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; DAGISEL-GFX12-LABEL: workitem_workgroup_nonzero:
+; DAGISEL-GFX12: ; %bb.0: ; %entry
+; DAGISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; DAGISEL-GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DAGISEL-GFX12-NEXT: s_and_b32 s0, ttmp7, 0xffff
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_lshr_b32 s1, ttmp7, 16
+; DAGISEL-GFX12-NEXT: s_or_b32 s0, ttmp9, s0
+; DAGISEL-GFX12-NEXT: v_bfe_u32 v1, v31, 20, 10
+; DAGISEL-GFX12-NEXT: v_bfe_u32 v2, v31, 10, 10
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: v_or3_b32 v0, s0, s1, v0
+; DAGISEL-GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; DAGISEL-GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; DAGISEL-GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffd
+; DAGISEL-GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; DAGISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX942-LABEL: workitem_workgroup_nonzero:
+; GISEL-GFX942: ; %bb.0: ; %entry
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX942-NEXT: s_or_b32 s0, s12, s13
+; GISEL-GFX942-NEXT: s_or_b32 s0, s0, s14
+; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, 0x3ff
+; GISEL-GFX942-NEXT: v_and_or_b32 v0, v31, v0, s0
+; GISEL-GFX942-NEXT: v_bfe_u32 v1, v31, 10, 10
+; GISEL-GFX942-NEXT: v_bfe_u32 v2, v31, 20, 10
+; GISEL-GFX942-NEXT: v_or3_b32 v0, v0, v1, v2
+; GISEL-GFX942-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-GFX942-NEXT: s_nop 1
+; GISEL-GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX12-LABEL: workitem_workgroup_nonzero:
+; GISEL-GFX12: ; %bb.0: ; %entry
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: s_and_b32 s0, ttmp7, 0xffff
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_lshr_b32 s1, ttmp7, 16
+; GISEL-GFX12-NEXT: s_or_b32 s0, ttmp9, s0
+; GISEL-GFX12-NEXT: v_bfe_u32 v0, v31, 10, 10
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_or_b32 s0, s0, s1
+; GISEL-GFX12-NEXT: v_bfe_u32 v1, v31, 20, 10
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: v_and_or_b32 v2, 0x3ff, v31, s0
+; GISEL-GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-GFX12-NEXT: v_or3_b32 v0, v2, v0, v1
+; GISEL-GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffd
+; GISEL-GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %0 = tail call i32 @llvm.amdgcn.workgroup.id.x()
+ %1 = tail call i32 @llvm.amdgcn.workgroup.id.y()
+ %or = or i32 %0, %1
+ %2 = tail call i32 @llvm.amdgcn.workgroup.id.z()
+ %or1 = or i32 %or, %2
+ %3 = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %or2 = or i32 %or1, %3
+ %4 = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %or3 = or i32 %or2, %4
+ %5 = tail call i32 @llvm.amdgcn.workitem.id.z()
+ %or4 = or i32 %or3, %5
+ %cmp = icmp ne i32 %or4, 0
+ ret i1 %cmp
+}
diff --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll b/llvm/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
index dc1d4b2..25119fe 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
+++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
@@ -561,7 +561,7 @@ define void @test_load_store_struct(ptr %addr) {
; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY $r0
; CHECK-DAG: [[VAL1:%[0-9]+]]:_(s32) = G_LOAD [[ADDR1]](p0) :: (load (s32) from %ir.addr)
; CHECK-DAG: [[OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-; CHECK-DAG: [[ADDR2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR1]], [[OFFSET]](s32)
+; CHECK-DAG: [[ADDR2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR1]], [[OFFSET]](s32)
; CHECK-DAG: [[VAL2:%[0-9]+]]:_(s32) = G_LOAD [[ADDR2]](p0) :: (load (s32) from %ir.addr + 4)
; CHECK-DAG: G_STORE [[VAL1]](s32), [[ADDR1]](p0) :: (store (s32) into %ir.addr)
; CHECK-DAG: [[ADDR3:%[0-9]+]]:_(p0) = COPY [[ADDR2]]
diff --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir b/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir
index 044ad60..3c900c2 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir
@@ -128,7 +128,7 @@ body: |
; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY $r0
; CHECK-NEXT: [[V1:%[0-9]+]]:_(s32) = G_LOAD [[ADDR1]](p0) :: (load (s32), align 1)
; CHECK-NEXT: [[OFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR1]], [[OFF]]
+ ; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR1]], [[OFF]]
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[ADDR2]]
; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from unknown-address + 4, align 1)
; CHECK-NEXT: G_STORE [[V1]](s32), [[ADDR1]](p0) :: (store (s32), align 1)
@@ -165,7 +165,7 @@ body: |
; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY $r0
; CHECK-NEXT: [[V1:%[0-9]+]]:_(s32) = G_LOAD [[ADDR1]](p0) :: (load (s32), align 1)
; CHECK-NEXT: [[OFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR1]], [[OFF]]
+ ; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR1]], [[OFF]]
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[ADDR2]]
; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from unknown-address + 4, align 1)
; CHECK-NEXT: G_STORE [[V1]](s32), [[ADDR1]](p0) :: (store (s32), align 1)
diff --git a/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..8f7b050
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..05e1e8b
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..a65e5c5
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/fcopysign.ll b/llvm/test/CodeGen/ARM/fcopysign.ll
index b183418..dbebe44 100644
--- a/llvm/test/CodeGen/ARM/fcopysign.ll
+++ b/llvm/test/CodeGen/ARM/fcopysign.ll
@@ -85,6 +85,7 @@ define float @test4() nounwind {
; SOFT-NEXT: vadd.f32 d0, d0, d16
; SOFT-NEXT: vmov r0, s0
; SOFT-NEXT: pop {lr}
+; SOFT-NEXT: bx lr
;
; HARD-LABEL: test4:
; HARD: @ %bb.0: @ %entry
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
index 0f711630..4a65a53 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
@@ -2,7 +2,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Descriptor Range type: Invalid
+; CHECK: error: Invalid Descriptor Range type
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
index 6551116..031dfca 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
@@ -2,7 +2,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Root Signature Element: NOTRootFlags
+; CHECK: error: Invalid Root Signature Element
; CHECK-NOT: Root Signature Definitions
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
index 579528d..2739320 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Root Signature Element: Invalid
+; CHECK: error: Invalid Root Signature Element
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
entry:
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
index 7e7d56e..855e0c0 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MaxLOD: 0
+; CHECK: error: Invalid value for MaxLOD: nan
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
index d958f10..812749b 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MinLOD: 0
+; CHECK: error: Invalid value for MinLOD: nan
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
index 34b27eb..6898aec 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MipLODBias: 666
+; CHECK: error: Invalid value for MipLODBias: 6.660000e+02
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll b/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
index b23366b..f5430df 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
@@ -1,20 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon -verify-machineinstrs < %s | FileCheck %s
@.str = private unnamed_addr constant [31 x i8] c"DHRYSTONE PROGRAM, 3'RD STRING\00", align 1
@.str1 = private unnamed_addr constant [3 x i8] c"%s\00", align 1
-; Function Attrs: nounwind
declare i32 @printf(i8* nocapture readonly, ...)
; Function Attrs: nounwind
-define i32 @main() {
+define i32 @main() nounwind {
; CHECK-LABEL: main:
-; CHECK: .cfi_startproc
-; CHECK-NEXT: // %bb.0: // %entry
-; CHECK-NEXT: .cfi_def_cfa r30, 8
-; CHECK-NEXT: .cfi_offset r31, -4
-; CHECK-NEXT: .cfi_offset r30, -8
+; CHECK: // %bb.0: // %entry
; CHECK-NEXT: {
; CHECK-NEXT: r0 = ##.L.str1
; CHECK-NEXT: r3:2 = CONST64(#2325073635944967245)
@@ -53,5 +48,4 @@ entry:
ret i32 0
}
-; Function Attrs: nounwind
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
index c1d4220..e5a8524 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
@@ -114,22 +114,15 @@ define void @insert_4xdouble(ptr %src, ptr %dst, double %in) nounwind {
define void @insert_32xi8_idx(ptr %src, ptr %dst, i8 %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_32xi8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvst $xr0, $sp, 32
-; CHECK-NEXT: addi.d $a0, $sp, 32
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 0
-; CHECK-NEXT: st.b $a2, $a0, 0
-; CHECK-NEXT: xvld $xr0, $sp, 32
+; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI8_0)
+; CHECK-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI8_0)
+; CHECK-NEXT: xvld $xr1, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: xvreplgr2vr.b $xr2, $a0
+; CHECK-NEXT: xvseq.b $xr0, $xr2, $xr0
+; CHECK-NEXT: xvreplgr2vr.b $xr2, $a2
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
; CHECK-NEXT: xvst $xr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <32 x i8>, ptr %src
%v_new = insertelement <32 x i8> %v, i8 %in, i32 %idx
@@ -140,22 +133,15 @@ define void @insert_32xi8_idx(ptr %src, ptr %dst, i8 %in, i32 %idx) nounwind {
define void @insert_16xi16_idx(ptr %src, ptr %dst, i16 %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_16xi16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvst $xr0, $sp, 32
-; CHECK-NEXT: addi.d $a0, $sp, 32
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 1
-; CHECK-NEXT: st.h $a2, $a0, 0
-; CHECK-NEXT: xvld $xr0, $sp, 32
+; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI9_0)
+; CHECK-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI9_0)
+; CHECK-NEXT: xvld $xr1, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: xvreplgr2vr.h $xr2, $a0
+; CHECK-NEXT: xvseq.h $xr0, $xr2, $xr0
+; CHECK-NEXT: xvreplgr2vr.h $xr2, $a2
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
; CHECK-NEXT: xvst $xr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <16 x i16>, ptr %src
%v_new = insertelement <16 x i16> %v, i16 %in, i32 %idx
@@ -166,22 +152,15 @@ define void @insert_16xi16_idx(ptr %src, ptr %dst, i16 %in, i32 %idx) nounwind {
define void @insert_8xi32_idx(ptr %src, ptr %dst, i32 %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_8xi32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvst $xr0, $sp, 32
-; CHECK-NEXT: addi.d $a0, $sp, 32
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 2
-; CHECK-NEXT: st.w $a2, $a0, 0
-; CHECK-NEXT: xvld $xr0, $sp, 32
+; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI10_0)
+; CHECK-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI10_0)
+; CHECK-NEXT: xvld $xr1, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: xvreplgr2vr.w $xr2, $a0
+; CHECK-NEXT: xvseq.w $xr0, $xr2, $xr0
+; CHECK-NEXT: xvreplgr2vr.w $xr2, $a2
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
; CHECK-NEXT: xvst $xr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <8 x i32>, ptr %src
%v_new = insertelement <8 x i32> %v, i32 %in, i32 %idx
@@ -192,22 +171,15 @@ define void @insert_8xi32_idx(ptr %src, ptr %dst, i32 %in, i32 %idx) nounwind {
define void @insert_4xi64_idx(ptr %src, ptr %dst, i64 %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_4xi64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: xvld $xr0, $a0, 0
-; CHECK-NEXT: xvst $xr0, $sp, 32
-; CHECK-NEXT: addi.d $a0, $sp, 32
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 3
-; CHECK-NEXT: st.d $a2, $a0, 0
-; CHECK-NEXT: xvld $xr0, $sp, 32
+; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI11_0)
+; CHECK-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI11_0)
+; CHECK-NEXT: xvld $xr1, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: xvreplgr2vr.d $xr2, $a0
+; CHECK-NEXT: xvseq.d $xr0, $xr2, $xr0
+; CHECK-NEXT: xvreplgr2vr.d $xr2, $a2
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0
; CHECK-NEXT: xvst $xr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <4 x i64>, ptr %src
%v_new = insertelement <4 x i64> %v, i64 %in, i32 %idx
@@ -218,22 +190,16 @@ define void @insert_4xi64_idx(ptr %src, ptr %dst, i64 %in, i32 %idx) nounwind {
define void @insert_8xfloat_idx(ptr %src, ptr %dst, float %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_8xfloat_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: xvld $xr1, $a0, 0
-; CHECK-NEXT: xvst $xr1, $sp, 32
-; CHECK-NEXT: addi.d $a0, $sp, 32
-; CHECK-NEXT: bstrins.d $a0, $a2, 4, 2
-; CHECK-NEXT: fst.s $fa0, $a0, 0
-; CHECK-NEXT: xvld $xr0, $sp, 32
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
+; CHECK-NEXT: pcalau12i $a3, %pc_hi20(.LCPI12_0)
+; CHECK-NEXT: xvld $xr1, $a3, %pc_lo12(.LCPI12_0)
+; CHECK-NEXT: xvld $xr2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: xvreplgr2vr.w $xr3, $a0
+; CHECK-NEXT: xvseq.w $xr1, $xr3, $xr1
+; CHECK-NEXT: xvreplve0.w $xr0, $xr0
+; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr0, $xr1
; CHECK-NEXT: xvst $xr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <8 x float>, ptr %src
%v_new = insertelement <8 x float> %v, float %in, i32 %idx
@@ -244,22 +210,16 @@ define void @insert_8xfloat_idx(ptr %src, ptr %dst, float %in, i32 %idx) nounwin
define void @insert_4xdouble_idx(ptr %src, ptr %dst, double %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_4xdouble_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: xvld $xr1, $a0, 0
-; CHECK-NEXT: xvst $xr1, $sp, 32
-; CHECK-NEXT: addi.d $a0, $sp, 32
-; CHECK-NEXT: bstrins.d $a0, $a2, 4, 3
-; CHECK-NEXT: fst.d $fa0, $a0, 0
-; CHECK-NEXT: xvld $xr0, $sp, 32
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0
+; CHECK-NEXT: pcalau12i $a3, %pc_hi20(.LCPI13_0)
+; CHECK-NEXT: xvld $xr1, $a3, %pc_lo12(.LCPI13_0)
+; CHECK-NEXT: xvld $xr2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: xvreplgr2vr.d $xr3, $a0
+; CHECK-NEXT: xvseq.d $xr1, $xr3, $xr1
+; CHECK-NEXT: xvreplve0.d $xr0, $xr0
+; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr0, $xr1
; CHECK-NEXT: xvst $xr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <4 x double>, ptr %src
%v_new = insertelement <4 x double> %v, double %in, i32 %idx
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
index c73252b..4bb1941 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
@@ -84,15 +84,15 @@ define void @insert_2xdouble(ptr %src, ptr %dst, double %ins) nounwind {
define void @insert_16xi8_idx(ptr %src, ptr %dst, i8 %ins, i32 %idx) nounwind {
; CHECK-LABEL: insert_16xi8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 0
-; CHECK-NEXT: st.b $a2, $a0, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
+; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI6_0)
+; CHECK-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI6_0)
+; CHECK-NEXT: vld $vr1, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: vreplgr2vr.b $vr2, $a0
+; CHECK-NEXT: vseq.b $vr0, $vr2, $vr0
+; CHECK-NEXT: vreplgr2vr.b $vr2, $a2
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0
; CHECK-NEXT: vst $vr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
; CHECK-NEXT: ret
%v = load volatile <16 x i8>, ptr %src
%v_new = insertelement <16 x i8> %v, i8 %ins, i32 %idx
@@ -103,15 +103,15 @@ define void @insert_16xi8_idx(ptr %src, ptr %dst, i8 %ins, i32 %idx) nounwind {
define void @insert_8xi16_idx(ptr %src, ptr %dst, i16 %ins, i32 %idx) nounwind {
; CHECK-LABEL: insert_8xi16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 1
-; CHECK-NEXT: st.h $a2, $a0, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
+; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI7_0)
+; CHECK-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI7_0)
+; CHECK-NEXT: vld $vr1, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: vreplgr2vr.h $vr2, $a0
+; CHECK-NEXT: vseq.h $vr0, $vr2, $vr0
+; CHECK-NEXT: vreplgr2vr.h $vr2, $a2
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0
; CHECK-NEXT: vst $vr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
; CHECK-NEXT: ret
%v = load volatile <8 x i16>, ptr %src
%v_new = insertelement <8 x i16> %v, i16 %ins, i32 %idx
@@ -122,15 +122,15 @@ define void @insert_8xi16_idx(ptr %src, ptr %dst, i16 %ins, i32 %idx) nounwind {
define void @insert_4xi32_idx(ptr %src, ptr %dst, i32 %ins, i32 %idx) nounwind {
; CHECK-LABEL: insert_4xi32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 2
-; CHECK-NEXT: st.w $a2, $a0, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
+; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI8_0)
+; CHECK-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI8_0)
+; CHECK-NEXT: vld $vr1, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: vreplgr2vr.w $vr2, $a0
+; CHECK-NEXT: vseq.w $vr0, $vr2, $vr0
+; CHECK-NEXT: vreplgr2vr.w $vr2, $a2
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0
; CHECK-NEXT: vst $vr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
; CHECK-NEXT: ret
%v = load volatile <4 x i32>, ptr %src
%v_new = insertelement <4 x i32> %v, i32 %ins, i32 %idx
@@ -141,15 +141,15 @@ define void @insert_4xi32_idx(ptr %src, ptr %dst, i32 %ins, i32 %idx) nounwind {
define void @insert_2xi64_idx(ptr %src, ptr %dst, i64 %ins, i32 %idx) nounwind {
; CHECK-LABEL: insert_2xi64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: vld $vr0, $a0, 0
-; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 3
-; CHECK-NEXT: st.d $a2, $a0, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
+; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI9_0)
+; CHECK-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI9_0)
+; CHECK-NEXT: vld $vr1, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: vreplgr2vr.d $vr2, $a0
+; CHECK-NEXT: vseq.d $vr0, $vr2, $vr0
+; CHECK-NEXT: vreplgr2vr.d $vr2, $a2
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0
; CHECK-NEXT: vst $vr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
; CHECK-NEXT: ret
%v = load volatile <2 x i64>, ptr %src
%v_new = insertelement <2 x i64> %v, i64 %ins, i32 %idx
@@ -160,15 +160,16 @@ define void @insert_2xi64_idx(ptr %src, ptr %dst, i64 %ins, i32 %idx) nounwind {
define void @insert_4xfloat_idx(ptr %src, ptr %dst, float %ins, i32 %idx) nounwind {
; CHECK-LABEL: insert_4xfloat_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: vld $vr1, $a0, 0
-; CHECK-NEXT: vst $vr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 3, 2
-; CHECK-NEXT: fst.s $fa0, $a0, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: pcalau12i $a3, %pc_hi20(.LCPI10_0)
+; CHECK-NEXT: vld $vr1, $a3, %pc_lo12(.LCPI10_0)
+; CHECK-NEXT: vld $vr2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: vreplgr2vr.w $vr3, $a0
+; CHECK-NEXT: vseq.w $vr1, $vr3, $vr1
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr0, $vr1
; CHECK-NEXT: vst $vr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
; CHECK-NEXT: ret
%v = load volatile <4 x float>, ptr %src
%v_new = insertelement <4 x float> %v, float %ins, i32 %idx
@@ -179,15 +180,16 @@ define void @insert_4xfloat_idx(ptr %src, ptr %dst, float %ins, i32 %idx) nounwi
define void @insert_2xdouble_idx(ptr %src, ptr %dst, double %ins, i32 %idx) nounwind {
; CHECK-LABEL: insert_2xdouble_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: vld $vr1, $a0, 0
-; CHECK-NEXT: vst $vr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 3, 3
-; CHECK-NEXT: fst.d $fa0, $a0, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; CHECK-NEXT: pcalau12i $a3, %pc_hi20(.LCPI11_0)
+; CHECK-NEXT: vld $vr1, $a3, %pc_lo12(.LCPI11_0)
+; CHECK-NEXT: vld $vr2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: vreplgr2vr.d $vr3, $a0
+; CHECK-NEXT: vseq.d $vr1, $vr3, $vr1
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr0, $vr1
; CHECK-NEXT: vst $vr0, $a1, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
; CHECK-NEXT: ret
%v = load volatile <2 x double>, ptr %src
%v_new = insertelement <2 x double> %v, double %ins, i32 %idx
diff --git a/llvm/test/CodeGen/M68k/GlobalISel/irtranslator-call.ll b/llvm/test/CodeGen/M68k/GlobalISel/irtranslator-call.ll
index b4ecbd5..b0b0383 100644
--- a/llvm/test/CodeGen/M68k/GlobalISel/irtranslator-call.ll
+++ b/llvm/test/CodeGen/M68k/GlobalISel/irtranslator-call.ll
@@ -112,10 +112,10 @@ define void @test_arg_struct(ptr %0) nounwind {
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p0) :: (load (s8) from %ir.0, align 2)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s32)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from %ir.0 + 2)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C1]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.0 + 4, align 2)
; CHECK-NEXT: ADJCALLSTACKDOWN 12, 0, implicit-def $sp, implicit-def $ccr, implicit $sp
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $sp
@@ -148,25 +148,25 @@ define void @test_arg_array(ptr %0) nounwind {
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p0) :: (load (s8) from %ir.0)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s32)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from %ir.0 + 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C1]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from %ir.0 + 2)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C2]](s32)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C2]](s32)
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from %ir.0 + 3)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C3]](s32)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C3]](s32)
; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD3]](p0) :: (load (s8) from %ir.0 + 4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C4]](s32)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C4]](s32)
; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD4]](p0) :: (load (s8) from %ir.0 + 5)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C5]](s32)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C5]](s32)
; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD5]](p0) :: (load (s8) from %ir.0 + 6)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C6]](s32)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C6]](s32)
; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from %ir.0 + 7)
; CHECK-NEXT: ADJCALLSTACKDOWN 32, 0, implicit-def $sp, implicit-def $ccr, implicit $sp
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $sp
diff --git a/llvm/test/CodeGen/M68k/GlobalISel/legalize-load-store.mir b/llvm/test/CodeGen/M68k/GlobalISel/legalize-load-store.mir
index e9709f5..fbc91ca 100644
--- a/llvm/test/CodeGen/M68k/GlobalISel/legalize-load-store.mir
+++ b/llvm/test/CodeGen/M68k/GlobalISel/legalize-load-store.mir
@@ -79,13 +79,13 @@ body: |
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C4]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C4]](s32)
; CHECK-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 12)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C5]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C5]](s32)
; CHECK-NEXT: G_STORE [[C1]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 8, align 8)
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C6]](s32)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C6]](s32)
; CHECK-NEXT: G_STORE [[C2]](s32), [[PTR_ADD2]](p0) :: (store (s32) into unknown-address + 4)
; CHECK-NEXT: G_STORE [[C3]](s32), [[LOAD]](p0) :: (store (s32), align 16)
; CHECK-NEXT: RTS
diff --git a/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..3f7590a
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll
@@ -0,0 +1,91 @@
+;; Test MIR printer and parser for type id field in call site info. Test that
+;; it works well with/without --emit-call-site-info.
+
+;; Multiplex --call-graph-section and -emit-call-site-info as both utilize
+;; CallSiteInfo and callSites.
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with --call-graph-section only.
+
+;; Test printer.
+;; Verify that fwdArgRegs is not set, calleeTypeIds is set.
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc -mtriple=x86_64 --call-graph-section %s -stop-after=finalize-isel -o %t1.mir
+; RUN: cat %t1.mir | FileCheck %s --check-prefix=PRINTER_CGS
+; PRINTER_CGS: name: main
+; PRINTER_CGS: callSites:
+; PRINTER_CGS-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; PRINTER_CGS-NEXT: [ 7854600665770582568 ] }
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 --call-graph-section %t1.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CGS
+; PARSER_CGS: name: main
+; PARSER_CGS: callSites:
+; PARSER_CGS-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; PARSER_CGS-NEXT: [ 7854600665770582568 ] }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with -emit-call-site-info only.
+
+;; Test printer.
+;; Verify that fwdArgRegs is set, calleeTypeIds is not set.
+; RUN: llc -mtriple=x86_64 -emit-call-site-info %s -stop-after=finalize-isel -o %t2.mir
+; RUN: cat %t2.mir | FileCheck %s --check-prefix=PRINTER_CSI
+; PRINTER_CSI: name: main
+; PRINTER_CSI: callSites:
+; PRINTER_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PRINTER_CSI-NEXT: { arg: 0, reg: {{.*}} }
+; PRINTER_CSI-NOT: calleeTypeIds:
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 -emit-call-site-info %t2.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CSI
+; PARSER_CSI: name: main
+; PARSER_CSI: callSites:
+; PARSER_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PARSER_CSI-NEXT: { arg: 0, reg: {{.*}} }
+; PARSER_CSI-NOT: calleeTypeIds:
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with both -emit-call-site-info and --call-graph-section.
+
+;; Test printer.
+;; Verify both fwdArgRegs and calleeTypeIds are set.
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc -mtriple=x86_64 --call-graph-section -emit-call-site-info %s -stop-after=finalize-isel -o %t2.mir
+; RUN: cat %t2.mir | FileCheck %s --check-prefix=PRINTER_CGS_CSI
+; PRINTER_CGS_CSI: name: main
+; PRINTER_CGS_CSI: callSites:
+; PRINTER_CGS_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PRINTER_CGS_CSI-NEXT: { arg: 0, reg: {{.*}} }, calleeTypeIds:
+; PRINTER_CGS_CSI-NEXT: [ 7854600665770582568 ] }
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 --call-graph-section -emit-call-site-info %t2.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CGS_CSI
+; PARSER_CGS_CSI: name: main
+; PARSER_CGS_CSI: callSites:
+; PARSER_CGS_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PARSER_CGS_CSI-NEXT: { arg: 0, reg: {{.*}} }, calleeTypeIds:
+; PARSER_CGS_CSI-NEXT: [ 7854600665770582568 ] }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll
index d1a0248..fd3fe17 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll
@@ -12,11 +12,11 @@ define { float, float } @add_complex_float(ptr %a, ptr %b) {
; MIPS32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
; MIPS32-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir..realp)
; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32-NEXT: %5:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: %5:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %5(p0) :: (load (s32) from %ir..imagp)
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY [[COPY1]](p0)
; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir..realp1)
- ; MIPS32-NEXT: %9:_(p0) = nuw nusw G_PTR_ADD [[COPY1]], [[C]](s32)
+ ; MIPS32-NEXT: %9:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY1]], [[C]](s32)
; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD %9(p0) :: (load (s32) from %ir..imagp3)
; MIPS32-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[LOAD]], [[LOAD2]]
; MIPS32-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[LOAD1]], [[LOAD3]]
@@ -50,11 +50,11 @@ define { double, double } @add_complex_double(ptr %a, ptr %b) {
; MIPS32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
; MIPS32-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY2]](p0) :: (load (s64) from %ir..realp)
; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; MIPS32-NEXT: %5:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: %5:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD %5(p0) :: (load (s64) from %ir..imagp)
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY [[COPY1]](p0)
; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir..realp1)
- ; MIPS32-NEXT: %9:_(p0) = nuw nusw G_PTR_ADD [[COPY1]], [[C]](s32)
+ ; MIPS32-NEXT: %9:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY1]], [[C]](s32)
; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD %9(p0) :: (load (s64) from %ir..imagp3)
; MIPS32-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[LOAD]], [[LOAD2]]
; MIPS32-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[LOAD1]], [[LOAD3]]
@@ -91,7 +91,7 @@ define void @call_ret_complex_float(ptr %z) {
; MIPS32-NEXT: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32-NEXT: %5:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: %5:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; MIPS32-NEXT: G_STORE [[COPY1]](s32), [[COPY3]](p0) :: (store (s32) into %ir..realp)
; MIPS32-NEXT: G_STORE [[COPY2]](s32), %5(p0) :: (store (s32) into %ir..imagp)
; MIPS32-NEXT: RetRA
@@ -120,7 +120,7 @@ define void @call_ret_complex_double(ptr %z) {
; MIPS32-NEXT: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; MIPS32-NEXT: %5:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: %5:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; MIPS32-NEXT: G_STORE [[COPY1]](s64), [[COPY3]](p0) :: (store (s64) into %ir..realp)
; MIPS32-NEXT: G_STORE [[COPY2]](s64), %5(p0) :: (store (s64) into %ir..imagp)
; MIPS32-NEXT: RetRA
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll
index 58dc2f1..39fd348 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/sret_pointer.ll
@@ -13,7 +13,7 @@ define void @ZeroInit(ptr noalias sret(%struct.S) %agg.result) {
; MIPS32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
; MIPS32-NEXT: G_STORE [[C]](s32), [[COPY1]](p0) :: (store (s32) into %ir.x)
; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32-NEXT: %4:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; MIPS32-NEXT: %4:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; MIPS32-NEXT: G_STORE [[C]](s32), %4(p0) :: (store (s32) into %ir.y)
; MIPS32-NEXT: RetRA
entry:
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/var_arg.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/var_arg.ll
index 214e5aa..6e215de 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/var_arg.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/var_arg.ll
@@ -31,7 +31,7 @@ define void @testVaCopyArg(ptr %fmt, ...) {
; MIPS32-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX5]](p0), [[FRAME_INDEX4]](p0)
; MIPS32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX5]](p0) :: (dereferenceable load (p0) from %ir.aq)
; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32-NEXT: %13:_(p0) = nuw nusw G_PTR_ADD [[LOAD]], [[C]](s32)
+ ; MIPS32-NEXT: %13:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C]](s32)
; MIPS32-NEXT: G_STORE %13(p0), [[FRAME_INDEX5]](p0) :: (store (p0) into %ir.aq)
; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load (p0) from %ir.argp.cur)
; MIPS32-NEXT: G_STORE [[LOAD1]](p0), [[FRAME_INDEX6]](p0) :: (store (p0) into %ir.s)
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
index 3d6a243..54003f0 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
@@ -40,16 +40,17 @@ body: |
; MIPS32-LABEL: name: test_memcpy_inline
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
- ; MIPS32: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY1]](p0) :: (load (s8) from %ir.1, align 4)
- ; MIPS32: G_STORE [[LOAD]](s8), [[COPY]](p0) :: (store (s8) into %ir.0, align 4)
- ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s32)
- ; MIPS32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from %ir.1 + 1, basealign 4)
- ; MIPS32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
- ; MIPS32: G_STORE [[LOAD1]](s8), [[PTR_ADD1]](p0) :: (store (s8) into %ir.0 + 1, basealign 4)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY1]](p0) :: (load (s8) from %ir.1, align 4)
+ ; MIPS32-NEXT: G_STORE [[LOAD]](s8), [[COPY]](p0) :: (store (s8) into %ir.0, align 4)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s32)
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from %ir.1 + 1, basealign 4)
+ ; MIPS32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: G_STORE [[LOAD1]](s8), [[PTR_ADD1]](p0) :: (store (s8) into %ir.0 + 1, basealign 4)
+ ; MIPS32-NEXT: RetRA
%0:_(p0) = COPY $a0
%1:_(p0) = COPY $a1
%2:_(s64) = G_CONSTANT i64 2
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
index ef607c1..3f0b20c 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
@@ -21,10 +21,11 @@ body: |
; MIPS32-LABEL: name: load_i32
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
- ; MIPS32: $v0 = COPY [[LOAD]](s32)
- ; MIPS32: RetRA implicit $v0
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
+ ; MIPS32-NEXT: $v0 = COPY [[LOAD]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0
%0:_(p0) = COPY $a0
%1:_(s32) = G_LOAD %0(p0) :: (load (s32) from %ir.ptr)
$v0 = COPY %1(s32)
@@ -42,14 +43,15 @@ body: |
; MIPS32-LABEL: name: load_i64
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.ptr + 4, basealign 8)
- ; MIPS32: $v0 = COPY [[LOAD]](s32)
- ; MIPS32: $v1 = COPY [[LOAD1]](s32)
- ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.ptr + 4, basealign 8)
+ ; MIPS32-NEXT: $v0 = COPY [[LOAD]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[LOAD1]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%0:_(p0) = COPY $a0
%1:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
%2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %1(s64)
@@ -69,11 +71,12 @@ body: |
; MIPS32-LABEL: name: load_ambiguous_i64_in_fpr
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
- ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.i64_ptr_a)
- ; MIPS32: G_STORE [[LOAD]](s64), [[COPY1]](p0) :: (store (s64) into %ir.i64_ptr_b)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.i64_ptr_a)
+ ; MIPS32-NEXT: G_STORE [[LOAD]](s64), [[COPY1]](p0) :: (store (s64) into %ir.i64_ptr_b)
+ ; MIPS32-NEXT: RetRA
%0:_(p0) = COPY $a0
%1:_(p0) = COPY $a1
%2:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.i64_ptr_a)
@@ -92,10 +95,11 @@ body: |
; MIPS32-LABEL: name: load_float
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
- ; MIPS32: $f0 = COPY [[LOAD]](s32)
- ; MIPS32: RetRA implicit $f0
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:fprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
+ ; MIPS32-NEXT: $f0 = COPY [[LOAD]](s32)
+ ; MIPS32-NEXT: RetRA implicit $f0
%0:_(p0) = COPY $a0
%1:_(s32) = G_LOAD %0(p0) :: (load (s32) from %ir.ptr)
$f0 = COPY %1(s32)
@@ -113,11 +117,12 @@ body: |
; MIPS32-LABEL: name: load_ambiguous_float_in_gpr
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.float_ptr_a)
- ; MIPS32: G_STORE [[LOAD]](s32), [[COPY1]](p0) :: (store (s32) into %ir.float_ptr_b)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.float_ptr_a)
+ ; MIPS32-NEXT: G_STORE [[LOAD]](s32), [[COPY1]](p0) :: (store (s32) into %ir.float_ptr_b)
+ ; MIPS32-NEXT: RetRA
%0:_(p0) = COPY $a0
%1:_(p0) = COPY $a1
%2:_(s32) = G_LOAD %0(p0) :: (load (s32) from %ir.float_ptr_a)
@@ -136,10 +141,11 @@ body: |
; MIPS32-LABEL: name: load_double
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.ptr)
- ; MIPS32: $d0 = COPY [[LOAD]](s64)
- ; MIPS32: RetRA implicit $d0
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.ptr)
+ ; MIPS32-NEXT: $d0 = COPY [[LOAD]](s64)
+ ; MIPS32-NEXT: RetRA implicit $d0
%0:_(p0) = COPY $a0
%1:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
$d0 = COPY %1(s64)
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
index 4226f2b..319bb2b 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
@@ -251,93 +251,117 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_ambiguous_i64_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -443,127 +467,151 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_i64_in_gpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C4]](s32)
- ; MIPS32: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD1:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C5]](s32)
- ; MIPS32: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.c, align 8)
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD2:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD1]], [[C6]](s32)
- ; MIPS32: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.c + 4, basealign 8)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
- ; MIPS32: [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
- ; MIPS32: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD3:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C8]](s32)
- ; MIPS32: G_STORE [[PHI1]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
- ; MIPS32: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD4:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C10]](s32)
- ; MIPS32: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
- ; MIPS32: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD5:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C11]](s32)
- ; MIPS32: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
- ; MIPS32: [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
- ; MIPS32: [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD6:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C13]](s32)
- ; MIPS32: G_STORE [[PHI3]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
- ; MIPS32: [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
- ; MIPS32: [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
- ; MIPS32: [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
- ; MIPS32: [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
- ; MIPS32: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
- ; MIPS32: [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
- ; MIPS32: [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
- ; MIPS32: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD7:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C15]](s32)
- ; MIPS32: G_STORE [[SELECT3]](s32), [[PTR_ADD7]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD8:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C16]](s32)
- ; MIPS32: G_STORE [[PHI5]](s32), [[PTR_ADD8]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[COPY3]], [[C4]](s32)
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD1:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C5]](s32)
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.c, align 8)
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD2:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD1]], [[C6]](s32)
+ ; MIPS32-NEXT: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.c + 4, basealign 8)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD3:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD2]], [[C8]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
+ ; MIPS32-NEXT: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD4:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[COPY3]], [[C10]](s32)
+ ; MIPS32-NEXT: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
+ ; MIPS32-NEXT: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD5:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C11]](s32)
+ ; MIPS32-NEXT: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
+ ; MIPS32-NEXT: [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD6:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD2]], [[C13]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI3]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
+ ; MIPS32-NEXT: [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
+ ; MIPS32-NEXT: [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
+ ; MIPS32-NEXT: [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
+ ; MIPS32-NEXT: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD7:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD2]], [[C15]](s32)
+ ; MIPS32-NEXT: G_STORE [[SELECT3]](s32), [[PTR_ADD7]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD8:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD2]], [[C16]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI5]](s32), [[PTR_ADD8]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -671,93 +719,117 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_ambiguous_double_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -863,94 +935,118 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_double_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
- ; MIPS32: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
index 4226f2b..319bb2b 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
@@ -251,93 +251,117 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_ambiguous_i64_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -443,127 +467,151 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_i64_in_gpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C4]](s32)
- ; MIPS32: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD1:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C5]](s32)
- ; MIPS32: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.c, align 8)
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD2:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD1]], [[C6]](s32)
- ; MIPS32: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.c + 4, basealign 8)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
- ; MIPS32: [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
- ; MIPS32: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD3:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C8]](s32)
- ; MIPS32: G_STORE [[PHI1]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
- ; MIPS32: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD4:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C10]](s32)
- ; MIPS32: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
- ; MIPS32: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD5:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C11]](s32)
- ; MIPS32: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
- ; MIPS32: [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
- ; MIPS32: [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD6:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C13]](s32)
- ; MIPS32: G_STORE [[PHI3]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
- ; MIPS32: [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
- ; MIPS32: [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
- ; MIPS32: [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
- ; MIPS32: [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
- ; MIPS32: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
- ; MIPS32: [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
- ; MIPS32: [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
- ; MIPS32: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD7:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C15]](s32)
- ; MIPS32: G_STORE [[SELECT3]](s32), [[PTR_ADD7]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD8:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C16]](s32)
- ; MIPS32: G_STORE [[PHI5]](s32), [[PTR_ADD8]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[COPY3]], [[C4]](s32)
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD1:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C5]](s32)
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.c, align 8)
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD2:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD1]], [[C6]](s32)
+ ; MIPS32-NEXT: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.c + 4, basealign 8)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD3:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD2]], [[C8]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
+ ; MIPS32-NEXT: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD4:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[COPY3]], [[C10]](s32)
+ ; MIPS32-NEXT: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
+ ; MIPS32-NEXT: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD5:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C11]](s32)
+ ; MIPS32-NEXT: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
+ ; MIPS32-NEXT: [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD6:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD2]], [[C13]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI3]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
+ ; MIPS32-NEXT: [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
+ ; MIPS32-NEXT: [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
+ ; MIPS32-NEXT: [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
+ ; MIPS32-NEXT: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD7:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD2]], [[C15]](s32)
+ ; MIPS32-NEXT: G_STORE [[SELECT3]](s32), [[PTR_ADD7]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD8:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[LOAD2]], [[C16]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI5]](s32), [[PTR_ADD8]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -671,93 +719,117 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_ambiguous_double_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -863,94 +935,118 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_double_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
- ; MIPS32: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
index 80bf04a..874056e 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
@@ -19,10 +19,11 @@ body: |
; MIPS32-LABEL: name: store_i32
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
- ; MIPS32: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32) into %ir.ptr)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+ ; MIPS32-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32) into %ir.ptr)
+ ; MIPS32-NEXT: RetRA
%0:_(s32) = COPY $a0
%1:_(p0) = COPY $a1
G_STORE %0(s32), %1(p0) :: (store (s32) into %ir.ptr)
@@ -40,14 +41,15 @@ body: |
; MIPS32-LABEL: name: store_i64
; MIPS32: liveins: $a0, $a1, $a2
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
- ; MIPS32: G_STORE [[COPY]](s32), [[COPY2]](p0) :: (store (s32) into %ir.ptr, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
- ; MIPS32: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.ptr + 4, basealign 8)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+ ; MIPS32-NEXT: G_STORE [[COPY]](s32), [[COPY2]](p0) :: (store (s32) into %ir.ptr, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:gprb(p0) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s32)
+ ; MIPS32-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.ptr + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
%2:_(s32) = COPY $a0
%3:_(s32) = COPY $a1
%0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
@@ -67,10 +69,11 @@ body: |
; MIPS32-LABEL: name: store_float
; MIPS32: liveins: $a1, $f12
- ; MIPS32: [[COPY:%[0-9]+]]:fprb(s32) = COPY $f12
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
- ; MIPS32: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32) into %ir.ptr)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:fprb(s32) = COPY $f12
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+ ; MIPS32-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32) into %ir.ptr)
+ ; MIPS32-NEXT: RetRA
%0:_(s32) = COPY $f12
%1:_(p0) = COPY $a1
G_STORE %0(s32), %1(p0) :: (store (s32) into %ir.ptr)
@@ -88,10 +91,11 @@ body: |
; MIPS32-LABEL: name: store_double
; MIPS32: liveins: $a2, $d6
- ; MIPS32: [[COPY:%[0-9]+]]:fprb(s64) = COPY $d6
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a2
- ; MIPS32: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64) into %ir.ptr)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:fprb(s64) = COPY $d6
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a2
+ ; MIPS32-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64) into %ir.ptr)
+ ; MIPS32-NEXT: RetRA
%0:_(s64) = COPY $d6
%1:_(p0) = COPY $a2
G_STORE %0(s64), %1(p0) :: (store (s64) into %ir.ptr)
diff --git a/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..a66a884
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..e7f162c
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..9f5e858
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/NVPTX/bug26185-2.ll b/llvm/test/CodeGen/NVPTX/bug26185-2.ll
index 4e11f58..46172b1 100644
--- a/llvm/test/CodeGen/NVPTX/bug26185-2.ll
+++ b/llvm/test/CodeGen/NVPTX/bug26185-2.ll
@@ -16,7 +16,7 @@ define ptx_kernel void @spam(ptr addrspace(1) noalias nocapture readonly %arg, p
; CHECK: .maxntid 1, 1, 1
; CHECK-NEXT: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .b64 %rd<9>;
+; CHECK-NEXT: .reg .b64 %rd<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %bb
; CHECK-NEXT: ld.param.b64 %rd1, [spam_param_0];
@@ -25,10 +25,9 @@ define ptx_kernel void @spam(ptr addrspace(1) noalias nocapture readonly %arg, p
; CHECK-NEXT: add.s64 %rd4, %rd1, %rd3;
; CHECK-NEXT: ld.param.b64 %rd5, [spam_param_1];
; CHECK-NEXT: ld.global.nc.s16 %r1, [%rd4+16];
-; CHECK-NEXT: mul.wide.s32 %rd6, %r1, %r1;
-; CHECK-NEXT: ld.global.b64 %rd7, [%rd5];
-; CHECK-NEXT: add.s64 %rd8, %rd6, %rd7;
-; CHECK-NEXT: st.global.b64 [%rd5], %rd8;
+; CHECK-NEXT: ld.global.b64 %rd6, [%rd5];
+; CHECK-NEXT: mad.wide.s32 %rd7, %r1, %r1, %rd6;
+; CHECK-NEXT: st.global.b64 [%rd5], %rd7;
; CHECK-NEXT: ret;
bb:
%tmp5 = add nsw i64 %arg3, 8
diff --git a/llvm/test/CodeGen/NVPTX/combine-wide.ll b/llvm/test/CodeGen/NVPTX/combine-wide.ll
new file mode 100644
index 0000000..ed4a2b6
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/combine-wide.ll
@@ -0,0 +1,1339 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -O1 | FileCheck %s --check-prefixes=CHECK,O1
+; RUN: llc < %s -O0 | FileCheck %s --check-prefixes=CHECK,O0
+
+target triple = "nvptx64-nvidia-cuda"
+
+define i64 @t1(i32 %a, i32 %b, i64 %c) {
+;
+; O1-LABEL: t1(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t1_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t1_param_1];
+; O1-NEXT: ld.param.b64 %rd1, [t1_param_2];
+; O1-NEXT: mad.wide.s32 %rd2, %r1, %r2, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t1(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t1_param_2];
+; O0-NEXT: ld.param.b32 %r2, [t1_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t1_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd2, %r3;
+; O0-NEXT: add.s64 %rd3, %rd1, %rd2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ %add = add i64 %c, %sext
+ ret i64 %add
+}
+
+define i64 @t2(i32 %a, i32 %b, i64 %c) {
+;
+; O1-LABEL: t2(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t2_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t2_param_1];
+; O1-NEXT: ld.param.b64 %rd1, [t2_param_2];
+; O1-NEXT: mad.wide.s32 %rd2, %r1, %r2, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t2(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t2_param_2];
+; O0-NEXT: ld.param.b32 %r2, [t2_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t2_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd2, %r3;
+; O0-NEXT: add.s64 %rd3, %rd2, %rd1;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ %add = add i64 %sext, %c
+ ret i64 %add
+}
+
+define i64 @t3(i32 %a, i32 %b) {
+;
+; O1-LABEL: t3(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t3_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t3_param_1];
+; O1-NEXT: mad.wide.s32 %rd1, %r1, %r2, 1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t3(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<3>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t3_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t3_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: add.s64 %rd2, %rd1, 1;
+; O0-NEXT: st.param.b64 [func_retval0], %rd2;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ %add = add i64 1, %sext
+ ret i64 %add
+}
+
+define i64 @t4(i32 %a, i64 %c) {
+;
+; O1-LABEL: t4(
+; O1: {
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t4_param_0];
+; O1-NEXT: ld.param.b64 %rd1, [t4_param_1];
+; O1-NEXT: mad.wide.s32 %rd2, %r1, 3, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t4(
+; O0: {
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t4_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t4_param_0];
+; O0-NEXT: mul.lo.s32 %r2, %r1, 3;
+; O0-NEXT: cvt.s64.s32 %rd2, %r2;
+; O0-NEXT: add.s64 %rd3, %rd1, %rd2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, 3
+ %sext = sext i32 %mul to i64
+ %add = add i64 %c, %sext
+ ret i64 %add
+}
+
+define i64 @t4_1(i32 %a, i64 %c) {
+;
+; O1-LABEL: t4_1(
+; O1: {
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t4_1_param_0];
+; O1-NEXT: mad.wide.s32 %rd1, %r1, 3, 5;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t4_1(
+; O0: {
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-NEXT: .reg .b64 %rd<3>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t4_1_param_0];
+; O0-NEXT: mul.lo.s32 %r2, %r1, 3;
+; O0-NEXT: cvt.s64.s32 %rd1, %r2;
+; O0-NEXT: add.s64 %rd2, %rd1, 5;
+; O0-NEXT: st.param.b64 [func_retval0], %rd2;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, 3
+ %sext = sext i32 %mul to i64
+ %add = add i64 5, %sext
+ ret i64 %add
+}
+
+define i64 @t5(i32 %a, i32 %b, i64 %c) {
+;
+; O1-LABEL: t5(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t5_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t5_param_1];
+; O1-NEXT: ld.param.b64 %rd1, [t5_param_2];
+; O1-NEXT: mad.wide.u32 %rd2, %r1, %r2, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t5(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t5_param_2];
+; O0-NEXT: ld.param.b32 %r2, [t5_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t5_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd2, %r3;
+; O0-NEXT: add.s64 %rd3, %rd1, %rd2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nuw i32 %a, %b
+ %zext = zext i32 %mul to i64
+ %add = add i64 %c, %zext
+ ret i64 %add
+}
+
+define i64 @t6(i32 %a, i32 %b, i64 %c) {
+;
+; O1-LABEL: t6(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t6_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t6_param_1];
+; O1-NEXT: ld.param.b64 %rd1, [t6_param_2];
+; O1-NEXT: mad.wide.u32 %rd2, %r1, %r2, %rd1;
+; O1-NEXT: st.param.b64 [func_retval0], %rd2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t6(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t6_param_2];
+; O0-NEXT: ld.param.b32 %r2, [t6_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t6_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd2, %r3;
+; O0-NEXT: add.s64 %rd3, %rd2, %rd1;
+; O0-NEXT: st.param.b64 [func_retval0], %rd3;
+; O0-NEXT: ret;
+ %mul = mul nuw i32 %a, %b
+ %zext = zext i32 %mul to i64
+ %add = add i64 %zext, %c
+ ret i64 %add
+}
+
+define i32 @t7(i16 %a, i16 %b) {
+;
+; O1-LABEL: t7(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t7_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t7_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u32.u16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t7(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t7_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t7_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul i16 %a, %b
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t8(i16 %a, i16 %b) {
+;
+; O1-LABEL: t8(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t8_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t8_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.s32.s16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t8(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t8_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t8_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul i16 %a, %b
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t9(i32 %a, i32 %b) {
+;
+; O1-LABEL: t9(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t9_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t9_param_1];
+; O1-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O1-NEXT: cvt.u64.u32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t9(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t9_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t9_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul i32 %a, %b
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t10(i32 %a, i32 %b) {
+;
+; O1-LABEL: t10(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t10_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t10_param_1];
+; O1-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O1-NEXT: cvt.s64.s32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t10(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t10_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t10_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul i32 %a, %b
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t11(i16 %a, i16 %b) {
+;
+; O1-LABEL: t11(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t11_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t11_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u32.u16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t11(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t11_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t11_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t12(i16 %a, i16 %b) {
+;
+; O1-LABEL: t12(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t12_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t12_param_1];
+; O1-NEXT: mul.wide.s16 %r1, %rs1, %rs2;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t12(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t12_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t12_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t13(i32 %a, i32 %b) {
+;
+; O1-LABEL: t13(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t13_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t13_param_1];
+; O1-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O1-NEXT: cvt.u64.u32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t13(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t13_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t13_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t14(i32 %a, i32 %b) {
+;
+; O1-LABEL: t14(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t14_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t14_param_1];
+; O1-NEXT: mul.wide.s32 %rd1, %r1, %r2;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t14(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t14_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t14_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nsw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t15(i16 %a, i16 %b) {
+;
+; O1-LABEL: t15(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t15_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t15_param_1];
+; O1-NEXT: mul.wide.u16 %r1, %rs1, %rs2;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t15(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t15_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t15_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t16(i16 %a, i16 %b) {
+;
+; O1-LABEL: t16(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t16_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t16_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.s32.s16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t16(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t16_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t16_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t17(i32 %a, i32 %b) {
+;
+; O1-LABEL: t17(
+; O1: {
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t17_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t17_param_1];
+; O1-NEXT: mul.wide.u32 %rd1, %r1, %r2;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t17(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t17_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t17_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.u64.u32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nuw i32 %a, %b
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t18(i32 %a, i32 %b) {
+;
+; O1-LABEL: t18(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t18_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t18_param_1];
+; O1-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O1-NEXT: cvt.s64.s32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t18(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t18_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t18_param_0];
+; O0-NEXT: mul.lo.s32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nuw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t19(i16 %a, i16 %b) {
+;
+; O1-LABEL: t19(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t19_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t19_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u32.u16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t19(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t19_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t19_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul i16 %a, %b
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t20(i16 %a) {
+;
+; CHECK-LABEL: t20(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b16 %rs1, [t20_param_0];
+; CHECK-NEXT: shl.b16 %rs2, %rs1, 4;
+; CHECK-NEXT: cvt.s32.s16 %r1, %rs2;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %mul = shl i16 %a, 4
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t21(i32 %a) {
+;
+; CHECK-LABEL: t21(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [t21_param_0];
+; CHECK-NEXT: shl.b32 %r2, %r1, 4;
+; CHECK-NEXT: cvt.u64.u32 %rd1, %r2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %mul = shl i32 %a, 4
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t22(i32 %a) {
+;
+; CHECK-LABEL: t22(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [t22_param_0];
+; CHECK-NEXT: shl.b32 %r2, %r1, 4;
+; CHECK-NEXT: cvt.s64.s32 %rd1, %r2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %mul = shl i32 %a, 4
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t23(i16 %a, i16 %b) {
+;
+; CHECK-LABEL: t23(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b16 %rs1, [t23_param_0];
+; CHECK-NEXT: shl.b16 %rs2, %rs1, 4;
+; CHECK-NEXT: cvt.u32.u16 %r1, %rs2;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %mul = shl nsw i16 %a, 4
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t24(i16 %a, i16 %b) {
+;
+; O1-LABEL: t24(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<2>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t24_param_0];
+; O1-NEXT: mul.wide.s16 %r1, %rs1, 16;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t24(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<3>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs1, [t24_param_0];
+; O0-NEXT: shl.b16 %rs2, %rs1, 4;
+; O0-NEXT: cvt.s32.s16 %r1, %rs2;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = shl nsw i16 %a, 4
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t25(i32 %a) {
+;
+; CHECK-LABEL: t25(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [t25_param_0];
+; CHECK-NEXT: shl.b32 %r2, %r1, 4;
+; CHECK-NEXT: cvt.u64.u32 %rd1, %r2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %mul = shl nsw i32 %a, 4
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t26(i32 %a) {
+;
+; O1-LABEL: t26(
+; O1: {
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t26_param_0];
+; O1-NEXT: mul.wide.s32 %rd1, %r1, 16;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t26(
+; O0: {
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t26_param_0];
+; O0-NEXT: shl.b32 %r2, %r1, 4;
+; O0-NEXT: cvt.s64.s32 %rd1, %r2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = shl nsw i32 %a, 4
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t27(i16 %a, i16 %b) {
+;
+; O1-LABEL: t27(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<2>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t27_param_0];
+; O1-NEXT: mul.wide.u16 %r1, %rs1, 16;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t27(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<3>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs1, [t27_param_0];
+; O0-NEXT: shl.b16 %rs2, %rs1, 4;
+; O0-NEXT: cvt.u32.u16 %r1, %rs2;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = shl nuw i16 %a, 4
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t28(i16 %a, i16 %b) {
+;
+; CHECK-LABEL: t28(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b16 %rs1, [t28_param_0];
+; CHECK-NEXT: shl.b16 %rs2, %rs1, 4;
+; CHECK-NEXT: cvt.s32.s16 %r1, %rs2;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %mul = shl nuw i16 %a, 4
+ %sext = sext i16 %mul to i32
+ ret i32 %sext
+}
+
+define i64 @t29(i32 %a) {
+;
+; O1-LABEL: t29(
+; O1: {
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t29_param_0];
+; O1-NEXT: mul.wide.u32 %rd1, %r1, 16;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t29(
+; O0: {
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t29_param_0];
+; O0-NEXT: shl.b32 %r2, %r1, 4;
+; O0-NEXT: cvt.u64.u32 %rd1, %r2;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = shl nuw i32 %a, 4
+ %zext = zext i32 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t30(i32 %a) {
+;
+; CHECK-LABEL: t30(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [t30_param_0];
+; CHECK-NEXT: shl.b32 %r2, %r1, 4;
+; CHECK-NEXT: cvt.s64.s32 %rd1, %r2;
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %mul = shl nuw i32 %a, 4
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i64 @t31(i32 %a, i32 %b) {
+;
+; O1-LABEL: t31(
+; O1: {
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b32 %r1, [t31_param_0];
+; O1-NEXT: ld.param.b32 %r2, [t31_param_1];
+; O1-NEXT: shl.b32 %r3, %r1, %r2;
+; O1-NEXT: cvt.s64.s32 %rd1, %r3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t31(
+; O0: {
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r2, [t31_param_1];
+; O0-NEXT: ld.param.b32 %r1, [t31_param_0];
+; O0-NEXT: shl.b32 %r3, %r1, %r2;
+; O0-NEXT: cvt.s64.s32 %rd1, %r3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = shl nuw i32 %a, %b
+ %sext = sext i32 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t32(i16 %a, i16 %b, i32 %c) {
+;
+; O1-LABEL: t32(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t32_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t32_param_1];
+; O1-NEXT: ld.param.b32 %r1, [t32_param_2];
+; O1-NEXT: mad.wide.s16 %r2, %rs1, %rs2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t32(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t32_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t32_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t32_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r1, %r2;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ %add = add i32 %c, %sext
+ ret i32 %add
+}
+
+define i32 @t33(i16 %a, i16 %b, i32 %c) {
+;
+; O1-LABEL: t33(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t33_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t33_param_1];
+; O1-NEXT: ld.param.b32 %r1, [t33_param_2];
+; O1-NEXT: mad.wide.s16 %r2, %rs1, %rs2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t33(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t33_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t33_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t33_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r1, %r2;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ %add = add i32 %c, %sext
+ ret i32 %add
+}
+
+define i32 @t34(i16 %a, i16 %b) {
+;
+; O1-LABEL: t34(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t34_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t34_param_1];
+; O1-NEXT: mad.wide.s16 %r1, %rs1, %rs2, 1;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t34(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t34_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t34_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s32.s16 %r1, %rs3;
+; O0-NEXT: add.s32 %r2, %r1, 1;
+; O0-NEXT: st.param.b32 [func_retval0], %r2;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i32
+ %add = add i32 1, %sext
+ ret i32 %add
+}
+
+define i32 @t35(i16 %a, i32 %c) {
+;
+; O1-LABEL: t35(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<2>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t35_param_0];
+; O1-NEXT: ld.param.b32 %r1, [t35_param_1];
+; O1-NEXT: mad.wide.s16 %r2, %rs1, 3, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t35(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<3>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t35_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t35_param_0];
+; O0-NEXT: mul.lo.s16 %rs2, %rs1, 3;
+; O0-NEXT: cvt.s32.s16 %r2, %rs2;
+; O0-NEXT: add.s32 %r3, %r1, %r2;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, 3
+ %sext = sext i16 %mul to i32
+ %add = add i32 %c, %sext
+ ret i32 %add
+}
+
+define i32 @t36(i16 %a, i32 %c) {
+;
+; O1-LABEL: t36(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<2>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t36_param_0];
+; O1-NEXT: mad.wide.s16 %r1, %rs1, 3, 5;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t36(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<3>;
+; O0-NEXT: .reg .b32 %r<3>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs1, [t36_param_0];
+; O0-NEXT: mul.lo.s16 %rs2, %rs1, 3;
+; O0-NEXT: cvt.s32.s16 %r1, %rs2;
+; O0-NEXT: add.s32 %r2, %r1, 5;
+; O0-NEXT: st.param.b32 [func_retval0], %r2;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, 3
+ %sext = sext i16 %mul to i32
+ %add = add i32 5, %sext
+ ret i32 %add
+}
+
+define i32 @t37(i16 %a, i16 %b, i32 %c) {
+;
+; O1-LABEL: t37(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t37_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t37_param_1];
+; O1-NEXT: ld.param.b32 %r1, [t37_param_2];
+; O1-NEXT: mad.wide.u16 %r2, %rs1, %rs2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t37(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t37_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t37_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t37_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r1, %r2;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %zext = zext i16 %mul to i32
+ %add = add i32 %c, %zext
+ ret i32 %add
+}
+
+define i32 @t38(i16 %a, i16 %b, i32 %c) {
+;
+; O1-LABEL: t38(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<3>;
+; O1-NEXT: .reg .b32 %r<3>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t38_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t38_param_1];
+; O1-NEXT: ld.param.b32 %r1, [t38_param_2];
+; O1-NEXT: mad.wide.u16 %r2, %rs1, %rs2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r2;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t38(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b32 %r1, [t38_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t38_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t38_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u32.u16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r2, %r1;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %zext = zext i16 %mul to i32
+ %add = add i32 %zext, %c
+ ret i32 %add
+}
+
+define i64 @t39(i16 %a, i16 %b) {
+; O1-LABEL: t39(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t39_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t39_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u64.u16 %rd1, %rs3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t39(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t39_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t39_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u64.u16 %rd1, %rs3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul i16 %a, %b
+ %zext = zext i16 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t40(i16 %a, i16 %b) {
+; O1-LABEL: t40(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t40_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t40_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.u64.u16 %rd1, %rs3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t40(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t40_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t40_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.u64.u16 %rd1, %rs3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ %zext = zext i16 %mul to i64
+ ret i64 %zext
+}
+
+define i64 @t41(i16 %a, i16 %b) {
+; O1-LABEL: t41(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t41_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t41_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: cvt.s64.s16 %rd1, %rs3;
+; O1-NEXT: st.param.b64 [func_retval0], %rd1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t41(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b16 %rs2, [t41_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t41_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: cvt.s64.s16 %rd1, %rs3;
+; O0-NEXT: st.param.b64 [func_retval0], %rd1;
+; O0-NEXT: ret;
+ %mul = mul nsw i16 %a, %b
+ %sext = sext i16 %mul to i64
+ ret i64 %sext
+}
+
+define i32 @t42(i16 %a, i16 %b, ptr %ptr) {
+; O1-LABEL: t42(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<2>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t42_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t42_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: ld.param.b64 %rd1, [t42_param_2];
+; O1-NEXT: st.b16 [%rd1], %rs3;
+; O1-NEXT: cvt.u32.u16 %r1, %rs3;
+; O1-NEXT: st.param.b32 [func_retval0], %r1;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t42(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<2>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t42_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t42_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t42_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: st.b16 [%rd1], %rs3;
+; O0-NEXT: cvt.u32.u16 %r1, %rs3;
+; O0-NEXT: st.param.b32 [func_retval0], %r1;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ store i16 %mul, ptr %ptr
+ %zext = zext i16 %mul to i32
+ ret i32 %zext
+}
+
+define i32 @t43(i16 %a, i16 %b, i32 %c, ptr %ptr) {
+; O1-LABEL: t43(
+; O1: {
+; O1-NEXT: .reg .b16 %rs<4>;
+; O1-NEXT: .reg .b32 %r<4>;
+; O1-NEXT: .reg .b64 %rd<2>;
+; O1-EMPTY:
+; O1-NEXT: // %bb.0:
+; O1-NEXT: ld.param.b16 %rs1, [t43_param_0];
+; O1-NEXT: ld.param.b16 %rs2, [t43_param_1];
+; O1-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O1-NEXT: ld.param.b64 %rd1, [t43_param_3];
+; O1-NEXT: st.b16 [%rd1], %rs3;
+; O1-NEXT: ld.param.b32 %r1, [t43_param_2];
+; O1-NEXT: cvt.u32.u16 %r2, %rs3;
+; O1-NEXT: add.s32 %r3, %r2, %r1;
+; O1-NEXT: st.param.b32 [func_retval0], %r3;
+; O1-NEXT: ret;
+;
+; O0-LABEL: t43(
+; O0: {
+; O0-NEXT: .reg .b16 %rs<4>;
+; O0-NEXT: .reg .b32 %r<4>;
+; O0-NEXT: .reg .b64 %rd<2>;
+; O0-EMPTY:
+; O0-NEXT: // %bb.0:
+; O0-NEXT: ld.param.b64 %rd1, [t43_param_3];
+; O0-NEXT: ld.param.b32 %r1, [t43_param_2];
+; O0-NEXT: ld.param.b16 %rs2, [t43_param_1];
+; O0-NEXT: ld.param.b16 %rs1, [t43_param_0];
+; O0-NEXT: mul.lo.s16 %rs3, %rs1, %rs2;
+; O0-NEXT: st.b16 [%rd1], %rs3;
+; O0-NEXT: cvt.u32.u16 %r2, %rs3;
+; O0-NEXT: add.s32 %r3, %r2, %r1;
+; O0-NEXT: st.param.b32 [func_retval0], %r3;
+; O0-NEXT: ret;
+ %mul = mul nuw i16 %a, %b
+ store i16 %mul, ptr %ptr
+ %zext = zext i16 %mul to i32
+ %add = add i32 %zext, %c
+ ret i32 %add
+}
diff --git a/llvm/test/CodeGen/NVPTX/fold-movs.ll b/llvm/test/CodeGen/NVPTX/fold-movs.ll
new file mode 100644
index 0000000..6ee0fb2
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/fold-movs.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O3 -disable-post-ra \
+; RUN: -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK-F32X2
+; RUN: %if ptxas-12.7 %{ \
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O3 -disable-post-ra \
+; RUN: -frame-pointer=all -verify-machineinstrs | %ptxas-verify -arch=sm_100 \
+; RUN: %}
+target triple = "nvptx64-nvidia-cuda"
+
+; Since fdiv doesn't support f32x2, this will create BUILD_VECTORs that will be
+; folded into the store, turning it into st.global.v8.b32.
+define void @writevec(<8 x float> %v1, <8 x float> %v2, ptr addrspace(1) %p) {
+; CHECK-F32X2-LABEL: writevec(
+; CHECK-F32X2: {
+; CHECK-F32X2-NEXT: .reg .b32 %r<25>;
+; CHECK-F32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-F32X2-EMPTY:
+; CHECK-F32X2-NEXT: // %bb.0:
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [writevec_param_0];
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [writevec_param_0+16];
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r9, %r10, %r11, %r12}, [writevec_param_1+16];
+; CHECK-F32X2-NEXT: div.rn.f32 %r13, %r8, %r12;
+; CHECK-F32X2-NEXT: div.rn.f32 %r14, %r7, %r11;
+; CHECK-F32X2-NEXT: div.rn.f32 %r15, %r6, %r10;
+; CHECK-F32X2-NEXT: div.rn.f32 %r16, %r5, %r9;
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r17, %r18, %r19, %r20}, [writevec_param_1];
+; CHECK-F32X2-NEXT: div.rn.f32 %r21, %r4, %r20;
+; CHECK-F32X2-NEXT: div.rn.f32 %r22, %r3, %r19;
+; CHECK-F32X2-NEXT: div.rn.f32 %r23, %r2, %r18;
+; CHECK-F32X2-NEXT: div.rn.f32 %r24, %r1, %r17;
+; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [writevec_param_2];
+; CHECK-F32X2-NEXT: st.global.v8.b32 [%rd1], {%r24, %r23, %r22, %r21, %r16, %r15, %r14, %r13};
+; CHECK-F32X2-NEXT: ret;
+ %v = fdiv <8 x float> %v1, %v2
+ store <8 x float> %v, ptr addrspace(1) %p, align 32
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
index 06c2cc8..26336b8 100644
--- a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
@@ -343,61 +343,77 @@ define <4 x i8> @test_smax(<4 x i8> %a, <4 x i8> %b) #0 {
; O0-LABEL: test_smax(
; O0: {
; O0-NEXT: .reg .pred %p<5>;
-; O0-NEXT: .reg .b32 %r<18>;
+; O0-NEXT: .reg .b32 %r<26>;
; O0-EMPTY:
; O0-NEXT: // %bb.0:
; O0-NEXT: ld.param.b32 %r2, [test_smax_param_1];
; O0-NEXT: ld.param.b32 %r1, [test_smax_param_0];
-; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O0-NEXT: setp.gt.s32 %p1, %r4, %r3;
-; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O0-NEXT: setp.gt.s32 %p2, %r6, %r5;
-; O0-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O0-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O0-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O0-NEXT: setp.gt.s32 %p3, %r8, %r7;
-; O0-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O0-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O0-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O0-NEXT: setp.gt.s32 %p4, %r10, %r9;
-; O0-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O0-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O0-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O0-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O0-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O0-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O0-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O0-NEXT: st.param.b32 [func_retval0], %r17;
+; O0-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O0-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O0-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O0-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O0-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O0-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O0-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O0-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O0-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O0-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O0-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O0-NEXT: st.param.b32 [func_retval0], %r25;
; O0-NEXT: ret;
;
; O3-LABEL: test_smax(
; O3: {
; O3-NEXT: .reg .pred %p<5>;
-; O3-NEXT: .reg .b32 %r<18>;
+; O3-NEXT: .reg .b32 %r<26>;
; O3-EMPTY:
; O3-NEXT: // %bb.0:
; O3-NEXT: ld.param.b32 %r1, [test_smax_param_0];
; O3-NEXT: ld.param.b32 %r2, [test_smax_param_1];
-; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O3-NEXT: setp.gt.s32 %p1, %r4, %r3;
-; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O3-NEXT: setp.gt.s32 %p2, %r6, %r5;
-; O3-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O3-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O3-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O3-NEXT: setp.gt.s32 %p3, %r8, %r7;
-; O3-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O3-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O3-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O3-NEXT: setp.gt.s32 %p4, %r10, %r9;
-; O3-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O3-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O3-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O3-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O3-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O3-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O3-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O3-NEXT: st.param.b32 [func_retval0], %r17;
+; O3-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O3-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O3-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O3-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O3-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O3-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O3-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O3-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O3-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O3-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O3-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O3-NEXT: st.param.b32 [func_retval0], %r25;
; O3-NEXT: ret;
%cmp = icmp sgt <4 x i8> %a, %b
%r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
@@ -473,61 +489,77 @@ define <4 x i8> @test_smin(<4 x i8> %a, <4 x i8> %b) #0 {
; O0-LABEL: test_smin(
; O0: {
; O0-NEXT: .reg .pred %p<5>;
-; O0-NEXT: .reg .b32 %r<18>;
+; O0-NEXT: .reg .b32 %r<26>;
; O0-EMPTY:
; O0-NEXT: // %bb.0:
; O0-NEXT: ld.param.b32 %r2, [test_smin_param_1];
; O0-NEXT: ld.param.b32 %r1, [test_smin_param_0];
-; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O0-NEXT: setp.le.s32 %p1, %r4, %r3;
-; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O0-NEXT: setp.le.s32 %p2, %r6, %r5;
-; O0-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O0-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O0-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O0-NEXT: setp.le.s32 %p3, %r8, %r7;
-; O0-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O0-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O0-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O0-NEXT: setp.le.s32 %p4, %r10, %r9;
-; O0-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O0-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O0-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O0-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O0-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O0-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O0-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O0-NEXT: st.param.b32 [func_retval0], %r17;
+; O0-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O0-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O0-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O0-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O0-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O0-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O0-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O0-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O0-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O0-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O0-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O0-NEXT: st.param.b32 [func_retval0], %r25;
; O0-NEXT: ret;
;
; O3-LABEL: test_smin(
; O3: {
; O3-NEXT: .reg .pred %p<5>;
-; O3-NEXT: .reg .b32 %r<18>;
+; O3-NEXT: .reg .b32 %r<26>;
; O3-EMPTY:
; O3-NEXT: // %bb.0:
; O3-NEXT: ld.param.b32 %r1, [test_smin_param_0];
; O3-NEXT: ld.param.b32 %r2, [test_smin_param_1];
-; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O3-NEXT: setp.le.s32 %p1, %r4, %r3;
-; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O3-NEXT: setp.le.s32 %p2, %r6, %r5;
-; O3-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O3-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O3-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O3-NEXT: setp.le.s32 %p3, %r8, %r7;
-; O3-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O3-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O3-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O3-NEXT: setp.le.s32 %p4, %r10, %r9;
-; O3-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O3-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O3-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O3-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O3-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O3-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O3-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O3-NEXT: st.param.b32 [func_retval0], %r17;
+; O3-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O3-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O3-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O3-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O3-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O3-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O3-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O3-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O3-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O3-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O3-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O3-NEXT: st.param.b32 [func_retval0], %r25;
; O3-NEXT: ret;
%cmp = icmp sle <4 x i8> %a, %b
%r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
diff --git a/llvm/test/CodeGen/NVPTX/ld-param-sink.ll b/llvm/test/CodeGen/NVPTX/ld-param-sink.ll
new file mode 100644
index 0000000..03523a3
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/ld-param-sink.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -verify-machineinstrs | FileCheck %s
+; RUN: %if ptxas %{ llc < %s | %ptxas-verify %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+declare ptr @bar(i64)
+declare i64 @baz()
+
+define ptr @foo(i1 %cond) {
+; CHECK-LABEL: foo(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.b8 %rs1, [foo_param_0];
+; CHECK-NEXT: and.b16 %rs2, %rs1, 1;
+; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0;
+; CHECK-NEXT: { // callseq 0, 0
+; CHECK-NEXT: .param .b64 retval0;
+; CHECK-NEXT: call.uni (retval0), baz, ();
+; CHECK-NEXT: ld.param.b64 %rd2, [retval0];
+; CHECK-NEXT: } // callseq 0
+; CHECK-NEXT: @%p1 bra $L__BB0_2;
+; CHECK-NEXT: // %bb.1: // %bb
+; CHECK-NEXT: { // callseq 1, 0
+; CHECK-NEXT: .param .b64 param0;
+; CHECK-NEXT: .param .b64 retval0;
+; CHECK-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-NEXT: call.uni (retval0), bar, (param0);
+; CHECK-NEXT: } // callseq 1
+; CHECK-NEXT: $L__BB0_2: // %common.ret
+; CHECK-NEXT: st.param.b64 [func_retval0], 0;
+; CHECK-NEXT: ret;
+entry:
+ %call = call i64 @baz()
+ br i1 %cond, label %common.ret, label %bb
+
+bb:
+ %tmp = call ptr @bar(i64 %call)
+ br label %common.ret
+
+common.ret:
+ ret ptr null
+}
diff --git a/llvm/test/CodeGen/NVPTX/local-stack-frame.ll b/llvm/test/CodeGen/NVPTX/local-stack-frame.ll
index 5c30173..ae069cf 100644
--- a/llvm/test/CodeGen/NVPTX/local-stack-frame.ll
+++ b/llvm/test/CodeGen/NVPTX/local-stack-frame.ll
@@ -114,15 +114,14 @@ define void @foo3(i32 %a) {
; PTX64-NEXT: .reg .b64 %SP;
; PTX64-NEXT: .reg .b64 %SPL;
; PTX64-NEXT: .reg .b32 %r<2>;
-; PTX64-NEXT: .reg .b64 %rd<5>;
+; PTX64-NEXT: .reg .b64 %rd<4>;
; PTX64-EMPTY:
; PTX64-NEXT: // %bb.0:
; PTX64-NEXT: mov.b64 %SPL, __local_depot2;
; PTX64-NEXT: ld.param.b32 %r1, [foo3_param_0];
; PTX64-NEXT: add.u64 %rd2, %SPL, 0;
-; PTX64-NEXT: mul.wide.s32 %rd3, %r1, 4;
-; PTX64-NEXT: add.s64 %rd4, %rd2, %rd3;
-; PTX64-NEXT: st.local.b32 [%rd4], %r1;
+; PTX64-NEXT: mad.wide.s32 %rd3, %r1, 4, %rd2;
+; PTX64-NEXT: st.local.b32 [%rd3], %r1;
; PTX64-NEXT: ret;
%local = alloca [3 x i32], align 4
%1 = getelementptr inbounds i32, ptr %local, i32 %a
diff --git a/llvm/test/CodeGen/NVPTX/vector-loads.ll b/llvm/test/CodeGen/NVPTX/vector-loads.ll
index e16fc74..6f0dff7 100644
--- a/llvm/test/CodeGen/NVPTX/vector-loads.ll
+++ b/llvm/test/CodeGen/NVPTX/vector-loads.ll
@@ -154,7 +154,7 @@ define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(1342177
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<4>;
; CHECK-NEXT: .reg .b32 %r<8>;
-; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b64 %rd1, [foo_complex_param_0];
@@ -166,12 +166,11 @@ define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(1342177
; CHECK-NEXT: shl.b32 %r6, %r1, 1;
; CHECK-NEXT: or.b32 %r7, %r5, %r6;
; CHECK-NEXT: cvt.u64.u32 %rd2, %r7;
-; CHECK-NEXT: mul.wide.u32 %rd3, %r3, 131072;
-; CHECK-NEXT: add.s64 %rd4, %rd1, %rd3;
-; CHECK-NEXT: add.s64 %rd5, %rd4, %rd2;
-; CHECK-NEXT: ld.v2.b8 {%rs1, %rs2}, [%rd5+128];
+; CHECK-NEXT: mad.wide.u32 %rd3, %r3, 131072, %rd1;
+; CHECK-NEXT: add.s64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: ld.v2.b8 {%rs1, %rs2}, [%rd4+128];
; CHECK-NEXT: max.u16 %rs3, %rs1, %rs2;
-; CHECK-NEXT: st.b8 [%rd5+129], %rs3;
+; CHECK-NEXT: st.b8 [%rd4+129], %rs3;
; CHECK-NEXT: ret;
%t0 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x(), !range !1
%t1 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
diff --git a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
index afc7a39..aae2326 100644
--- a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
@@ -750,21 +750,25 @@ entry:
define <2 x double> @testDoubleImm1(<2 x double> %a, double %b) {
; CHECK-64-LABEL: testDoubleImm1:
; CHECK-64: # %bb.0: # %entry
+; CHECK-64-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-64-NEXT: xxpermdi 34, 1, 34, 1
; CHECK-64-NEXT: blr
;
; CHECK-32-LABEL: testDoubleImm1:
; CHECK-32: # %bb.0: # %entry
+; CHECK-32-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-32-NEXT: xxpermdi 34, 1, 34, 1
; CHECK-32-NEXT: blr
;
; CHECK-64-P10-LABEL: testDoubleImm1:
; CHECK-64-P10: # %bb.0: # %entry
+; CHECK-64-P10-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-64-P10-NEXT: xxpermdi 34, 1, 34, 1
; CHECK-64-P10-NEXT: blr
;
; CHECK-32-P10-LABEL: testDoubleImm1:
; CHECK-32-P10: # %bb.0: # %entry
+; CHECK-32-P10-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-32-P10-NEXT: xxpermdi 34, 1, 34, 1
; CHECK-32-P10-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
index 9dd0fbe..10fc308 100644
--- a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
+++ b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll
@@ -1757,7 +1757,11 @@ entry:
define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) {
; P9BE-LABEL: fromRegsConvdtoi:
; P9BE: # %bb.0: # %entry
+; P9BE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
; P9BE-NEXT: xxmrghd vs0, vs2, vs4
+; P9BE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9BE-NEXT: xvcvdpsxws v2, vs0
; P9BE-NEXT: xxmrghd vs0, vs1, vs3
; P9BE-NEXT: xvcvdpsxws v3, vs0
@@ -1766,7 +1770,11 @@ define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) {
;
; P9LE-LABEL: fromRegsConvdtoi:
; P9LE: # %bb.0: # %entry
+; P9LE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9LE-NEXT: xxmrghd vs0, vs3, vs1
+; P9LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
; P9LE-NEXT: xvcvdpsxws v2, vs0
; P9LE-NEXT: xxmrghd vs0, vs4, vs2
; P9LE-NEXT: xvcvdpsxws v3, vs0
@@ -1775,6 +1783,10 @@ define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) {
;
; P8BE-LABEL: fromRegsConvdtoi:
; P8BE: # %bb.0: # %entry
+; P8BE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P8BE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8BE-NEXT: xxmrghd vs0, vs2, vs4
; P8BE-NEXT: xxmrghd vs1, vs1, vs3
; P8BE-NEXT: xvcvdpsxws v2, vs0
@@ -1784,6 +1796,10 @@ define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) {
;
; P8LE-LABEL: fromRegsConvdtoi:
; P8LE: # %bb.0: # %entry
+; P8LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P8LE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8LE-NEXT: xxmrghd vs0, vs3, vs1
; P8LE-NEXT: xxmrghd vs1, vs4, vs2
; P8LE-NEXT: xvcvdpsxws v2, vs0
@@ -3246,7 +3262,11 @@ entry:
define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d) {
; P9BE-LABEL: fromRegsConvdtoui:
; P9BE: # %bb.0: # %entry
+; P9BE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
; P9BE-NEXT: xxmrghd vs0, vs2, vs4
+; P9BE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9BE-NEXT: xvcvdpuxws v2, vs0
; P9BE-NEXT: xxmrghd vs0, vs1, vs3
; P9BE-NEXT: xvcvdpuxws v3, vs0
@@ -3255,7 +3275,11 @@ define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d)
;
; P9LE-LABEL: fromRegsConvdtoui:
; P9LE: # %bb.0: # %entry
+; P9LE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9LE-NEXT: xxmrghd vs0, vs3, vs1
+; P9LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
; P9LE-NEXT: xvcvdpuxws v2, vs0
; P9LE-NEXT: xxmrghd vs0, vs4, vs2
; P9LE-NEXT: xvcvdpuxws v3, vs0
@@ -3264,6 +3288,10 @@ define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d)
;
; P8BE-LABEL: fromRegsConvdtoui:
; P8BE: # %bb.0: # %entry
+; P8BE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P8BE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8BE-NEXT: xxmrghd vs0, vs2, vs4
; P8BE-NEXT: xxmrghd vs1, vs1, vs3
; P8BE-NEXT: xvcvdpuxws v2, vs0
@@ -3273,6 +3301,10 @@ define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d)
;
; P8LE-LABEL: fromRegsConvdtoui:
; P8LE: # %bb.0: # %entry
+; P8LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; P8LE-NEXT: # kill: def $f3 killed $f3 def $vsl3
+; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8LE-NEXT: xxmrghd vs0, vs3, vs1
; P8LE-NEXT: xxmrghd vs1, vs4, vs2
; P8LE-NEXT: xvcvdpuxws v2, vs0
@@ -4546,24 +4578,32 @@ entry:
define <2 x i64> @fromRegsConvdtoll(double %a, double %b) {
; P9BE-LABEL: fromRegsConvdtoll:
; P9BE: # %bb.0: # %entry
+; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9BE-NEXT: xxmrghd vs0, vs1, vs2
; P9BE-NEXT: xvcvdpsxds v2, vs0
; P9BE-NEXT: blr
;
; P9LE-LABEL: fromRegsConvdtoll:
; P9LE: # %bb.0: # %entry
+; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9LE-NEXT: xxmrghd vs0, vs2, vs1
; P9LE-NEXT: xvcvdpsxds v2, vs0
; P9LE-NEXT: blr
;
; P8BE-LABEL: fromRegsConvdtoll:
; P8BE: # %bb.0: # %entry
+; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8BE-NEXT: xxmrghd vs0, vs1, vs2
; P8BE-NEXT: xvcvdpsxds v2, vs0
; P8BE-NEXT: blr
;
; P8LE-LABEL: fromRegsConvdtoll:
; P8LE: # %bb.0: # %entry
+; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8LE-NEXT: xxmrghd vs0, vs2, vs1
; P8LE-NEXT: xvcvdpsxds v2, vs0
; P8LE-NEXT: blr
@@ -5700,24 +5740,32 @@ entry:
define <2 x i64> @fromRegsConvdtoull(double %a, double %b) {
; P9BE-LABEL: fromRegsConvdtoull:
; P9BE: # %bb.0: # %entry
+; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9BE-NEXT: xxmrghd vs0, vs1, vs2
; P9BE-NEXT: xvcvdpuxds v2, vs0
; P9BE-NEXT: blr
;
; P9LE-LABEL: fromRegsConvdtoull:
; P9LE: # %bb.0: # %entry
+; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9LE-NEXT: xxmrghd vs0, vs2, vs1
; P9LE-NEXT: xvcvdpuxds v2, vs0
; P9LE-NEXT: blr
;
; P8BE-LABEL: fromRegsConvdtoull:
; P8BE: # %bb.0: # %entry
+; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8BE-NEXT: xxmrghd vs0, vs1, vs2
; P8BE-NEXT: xvcvdpuxds v2, vs0
; P8BE-NEXT: blr
;
; P8LE-LABEL: fromRegsConvdtoull:
; P8LE: # %bb.0: # %entry
+; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8LE-NEXT: xxmrghd vs0, vs2, vs1
; P8LE-NEXT: xvcvdpuxds v2, vs0
; P8LE-NEXT: blr
diff --git a/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll b/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll
index 7f6fdc7..b40fbc3 100644
--- a/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll
+++ b/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll
@@ -562,6 +562,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; CHECK-P8-NEXT: bl dummy
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: xxlxor f0, f0, f0
+; CHECK-P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P8-NEXT: xxmrghd vs0, vs1, vs0
; CHECK-P8-NEXT: xxswapd vs0, vs0
; CHECK-P8-NEXT: stxvd2x vs0, 0, r30
@@ -576,6 +577,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; CHECK-P9-NEXT: bl dummy
; CHECK-P9-NEXT: nop
; CHECK-P9-NEXT: xxlxor f0, f0, f0
+; CHECK-P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P9-NEXT: xxmrghd vs0, vs1, vs0
; CHECK-P9-NEXT: stxv vs0, 0(r30)
;
@@ -589,6 +591,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; CHECK-P9-BE-NEXT: bl dummy
; CHECK-P9-BE-NEXT: nop
; CHECK-P9-BE-NEXT: xxlxor f0, f0, f0
+; CHECK-P9-BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P9-BE-NEXT: xxmrghd vs0, vs0, vs1
; CHECK-P9-BE-NEXT: stxv vs0, 0(r30)
;
@@ -615,6 +618,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; CHECK-P7-NEXT: bl dummy
; CHECK-P7-NEXT: nop
; CHECK-P7-NEXT: xxlxor f0, f0, f0
+; CHECK-P7-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P7-NEXT: xxmrghd vs0, vs1, vs0
; CHECK-P7-NEXT: xxswapd vs0, vs0
; CHECK-P7-NEXT: stxvd2x vs0, 0, r30
@@ -629,6 +633,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; P8-AIX-64-NEXT: bl .dummy[PR]
; P8-AIX-64-NEXT: nop
; P8-AIX-64-NEXT: xxlxor f0, f0, f0
+; P8-AIX-64-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-AIX-64-NEXT: xxmrghd vs0, vs0, vs1
; P8-AIX-64-NEXT: stxvd2x vs0, 0, r31
;
@@ -642,6 +647,7 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea
; P8-AIX-32-NEXT: bl .dummy[PR]
; P8-AIX-32-NEXT: nop
; P8-AIX-32-NEXT: xxlxor f0, f0, f0
+; P8-AIX-32-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-AIX-32-NEXT: xxmrghd vs0, vs0, vs1
; P8-AIX-32-NEXT: stxvd2x vs0, 0, r31
test_entry:
diff --git a/llvm/test/CodeGen/PowerPC/combine-fneg.ll b/llvm/test/CodeGen/PowerPC/combine-fneg.ll
index 04af094..a72abf7 100644
--- a/llvm/test/CodeGen/PowerPC/combine-fneg.ll
+++ b/llvm/test/CodeGen/PowerPC/combine-fneg.ll
@@ -6,6 +6,7 @@ define <4 x double> @fneg_fdiv_splat(double %a0, <4 x double> %a1) {
; CHECK-LABEL: fneg_fdiv_splat:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxspltd 0, 1, 0
; CHECK-NEXT: addi 3, 3, .LCPI0_0@toc@l
; CHECK-NEXT: xvredp 1, 0
diff --git a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
index eac4fb6..4519cf4 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
@@ -229,6 +229,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp
; P8-NEXT: xscvspdpn f1, vs0
; P8-NEXT: bl nearbyintf
; P8-NEXT: nop
+; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-NEXT: xxmrghd vs0, vs1, v30
; P8-NEXT: xscvspdpn f1, v31
; P8-NEXT: xvcvdpsp v29, vs0
@@ -239,6 +240,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp
; P8-NEXT: xscvspdpn f1, vs0
; P8-NEXT: bl nearbyintf
; P8-NEXT: nop
+; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-NEXT: xxmrghd vs0, v30, vs1
; P8-NEXT: li r3, 160
; P8-NEXT: xvcvdpsp v2, vs0
@@ -276,6 +278,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp
; P9-NEXT: xscvspdpn f1, vs0
; P9-NEXT: bl nearbyintf
; P9-NEXT: nop
+; P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9-NEXT: xxmrghd vs0, vs1, v30
; P9-NEXT: xscvspdpn f1, v31
; P9-NEXT: xvcvdpsp v29, vs0
@@ -286,6 +289,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp
; P9-NEXT: xscvspdpn f1, vs0
; P9-NEXT: bl nearbyintf
; P9-NEXT: nop
+; P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9-NEXT: xxmrghd vs0, v30, vs1
; P9-NEXT: lxv v31, 64(r1) # 16-byte Folded Reload
; P9-NEXT: lxv v30, 48(r1) # 16-byte Folded Reload
@@ -326,6 +330,7 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) stric
; P8-NEXT: bl nearbyint
; P8-NEXT: nop
; P8-NEXT: li r3, 144
+; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-NEXT: xxmrghd v2, v30, vs1
; P8-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; P8-NEXT: li r3, 128
@@ -354,6 +359,7 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) stric
; P9-NEXT: xxswapd vs1, v31
; P9-NEXT: bl nearbyint
; P9-NEXT: nop
+; P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P9-NEXT: xxmrghd v2, v30, vs1
; P9-NEXT: lxv v31, 48(r1) # 16-byte Folded Reload
; P9-NEXT: lxv v30, 32(r1) # 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/frem.ll b/llvm/test/CodeGen/PowerPC/frem.ll
index 19b4b1c..21cb206 100644
--- a/llvm/test/CodeGen/PowerPC/frem.ll
+++ b/llvm/test/CodeGen/PowerPC/frem.ll
@@ -70,6 +70,7 @@ define <4 x float> @frem4x32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: xscvspdpn 2, 0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd 0, 1, 61
; CHECK-NEXT: xscvspdpn 1, 62
; CHECK-NEXT: xscvspdpn 2, 63
@@ -83,6 +84,7 @@ define <4 x float> @frem4x32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: xscvspdpn 2, 0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd 0, 61, 1
; CHECK-NEXT: lxv 63, 80(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 62, 64(1) # 16-byte Folded Reload
@@ -124,6 +126,7 @@ define <2 x double> @frem2x64(<2 x double> %a, <2 x double> %b) {
; CHECK-NEXT: xxswapd 2, 63
; CHECK-NEXT: bl fmod
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd 34, 61, 1
; CHECK-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 62, 48(1) # 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll b/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll
index 238e200..3ae0b02 100644
--- a/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll
+++ b/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll
@@ -41,39 +41,47 @@ define void @test(ptr %p1, ptr %p2) nounwind {
; CHECK-NEXT: xxswapd 61, 63
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 56, 1
; CHECK-NEXT: xxlor 1, 59, 59
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 0, 1
; CHECK-NEXT: xxlor 1, 60, 60
; CHECK-NEXT: xxmrgld 59, 0, 56
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 60, 1
; CHECK-NEXT: xxlor 1, 62, 62
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 0, 1
; CHECK-NEXT: xxlor 1, 61, 61
; CHECK-NEXT: xxmrgld 62, 0, 60
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 61, 1
; CHECK-NEXT: xxlor 1, 63, 63
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 0, 1
; CHECK-NEXT: xxlor 1, 57, 57
; CHECK-NEXT: xxmrgld 63, 0, 61
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 61, 1
; CHECK-NEXT: xxlor 1, 58, 58
; CHECK-NEXT: bl roundeven
; CHECK-NEXT: nop
; CHECK-NEXT: li 3, 160
; CHECK-NEXT: stxvd2x 63, 30, 29
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxswapd 0, 1
; CHECK-NEXT: stxvd2x 62, 30, 28
; CHECK-NEXT: stxvd2x 59, 30, 27
diff --git a/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll b/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll
index 50f05cc..b83ac4a 100644
--- a/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll
+++ b/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll
@@ -666,6 +666,7 @@ define <4 x float> @test_extend32_vec4(ptr %p) #0 {
; P8-NEXT: bl __extendhfsf2
; P8-NEXT: nop
; P8-NEXT: li r3, 80
+; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; P8-NEXT: xxmrghd vs0, vs61, vs1
; P8-NEXT: xxmrghd vs1, vs63, vs62
; P8-NEXT: ld r30, 96(r1) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/ldexp.ll b/llvm/test/CodeGen/PowerPC/ldexp.ll
index 8d7253b..23748bc 100644
--- a/llvm/test/CodeGen/PowerPC/ldexp.ll
+++ b/llvm/test/CodeGen/PowerPC/ldexp.ll
@@ -107,6 +107,7 @@ define <4 x float> @ldexp_v4f32(<4 x float> %val, <4 x i32> %exp) nounwind {
; CHECK-NEXT: extsw r4, r3
; CHECK-NEXT: bl ldexpf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd vs0, vs1, v29
; CHECK-NEXT: li r3, 0
; CHECK-NEXT: vextuwrx r3, r3, v31
@@ -123,6 +124,7 @@ define <4 x float> @ldexp_v4f32(<4 x float> %val, <4 x i32> %exp) nounwind {
; CHECK-NEXT: xscvspdpn f1, vs0
; CHECK-NEXT: bl ldexpf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd vs0, vs1, v29
; CHECK-NEXT: lxv v31, 80(r1) # 16-byte Folded Reload
; CHECK-NEXT: lxv v30, 64(r1) # 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/llvm.modf.ll b/llvm/test/CodeGen/PowerPC/llvm.modf.ll
index 1b137c7..203b3bd 100644
--- a/llvm/test/CodeGen/PowerPC/llvm.modf.ll
+++ b/llvm/test/CodeGen/PowerPC/llvm.modf.ll
@@ -294,6 +294,7 @@ define { <2 x double>, <2 x double> } @test_modf_v2f64(<2 x double> %a) {
; CHECK-NEXT: addi r4, r1, 40
; CHECK-NEXT: bl modf
; CHECK-NEXT: nop
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd v2, v30, vs1
; CHECK-NEXT: lfd f0, 32(r1)
; CHECK-NEXT: lfd f1, 40(r1)
diff --git a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
index 291a9c1..b98aed8 100644
--- a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
@@ -940,21 +940,25 @@ entry:
define <2 x double> @testDoubleImm1(<2 x double> %a, double %b) {
; CHECK-LABEL: testDoubleImm1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-NEXT: xxmrghd v2, v2, vs1
; CHECK-NEXT: blr
;
; CHECK-BE-LABEL: testDoubleImm1:
; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-BE-NEXT: xxpermdi v2, vs1, v2, 1
; CHECK-BE-NEXT: blr
;
; CHECK-P9-LABEL: testDoubleImm1:
; CHECK-P9: # %bb.0: # %entry
+; CHECK-P9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; CHECK-P9-NEXT: xxpermdi v2, vs1, v2, 1
; CHECK-P9-NEXT: blr
;
; AIX-P8-LABEL: testDoubleImm1:
; AIX-P8: # %bb.0: # %entry
+; AIX-P8-NEXT: # kill: def $f1 killed $f1 def $vsl1
; AIX-P8-NEXT: xxpermdi v2, vs1, v2, 1
; AIX-P8-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
index 71c3069..5dac21b 100644
--- a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
@@ -107,6 +107,10 @@ entry:
define <3 x double> @constrained_vector_fdiv_v3f64(<3 x double> %x, <3 x double> %y) #0 {
; PC64LE-LABEL: constrained_vector_fdiv_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: xsdivdp 3, 3, 6
@@ -116,6 +120,10 @@ define <3 x double> @constrained_vector_fdiv_v3f64(<3 x double> %x, <3 x double>
;
; PC64LE9-LABEL: constrained_vector_fdiv_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: xsdivdp 3, 3, 6
@@ -209,6 +217,7 @@ define <2 x double> @constrained_vector_frem_v2f64(<2 x double> %x, <2 x double>
; PC64LE-NEXT: bl fmod
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -239,6 +248,7 @@ define <2 x double> @constrained_vector_frem_v2f64(<2 x double> %x, <2 x double>
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl fmod
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 61, 1
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 48(1) # 16-byte Folded Reload
@@ -390,6 +400,7 @@ define <3 x double> @constrained_vector_frem_v3f64(<3 x double> %x, <3 x double>
; PC64LE-NEXT: fmr 2, 30
; PC64LE-NEXT: bl fmod
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 29
; PC64LE-NEXT: fmr 2, 31
@@ -431,6 +442,7 @@ define <3 x double> @constrained_vector_frem_v3f64(<3 x double> %x, <3 x double>
; PC64LE9-NEXT: fmr 2, 30
; PC64LE9-NEXT: bl fmod
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 29
; PC64LE9-NEXT: fmr 2, 31
@@ -486,6 +498,7 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double>
; PC64LE-NEXT: xxswapd 2, 62
; PC64LE-NEXT: bl fmod
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 59, 1
; PC64LE-NEXT: xxlor 1, 61, 61
; PC64LE-NEXT: xxlor 2, 63, 63
@@ -498,6 +511,7 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double>
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 112
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 60, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 96
@@ -536,6 +550,7 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double>
; PC64LE9-NEXT: xxswapd 2, 62
; PC64LE9-NEXT: bl fmod
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 59, 1
; PC64LE9-NEXT: xscpsgndp 1, 61, 61
; PC64LE9-NEXT: xscpsgndp 2, 63, 63
@@ -546,6 +561,7 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double>
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl fmod
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 60, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 96(1) # 16-byte Folded Reload
@@ -670,6 +686,10 @@ entry:
define <3 x double> @constrained_vector_fmul_v3f64(<3 x double> %x, <3 x double> %y) #0 {
; PC64LE-LABEL: constrained_vector_fmul_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: xsmuldp 3, 3, 6
@@ -679,6 +699,10 @@ define <3 x double> @constrained_vector_fmul_v3f64(<3 x double> %x, <3 x double>
;
; PC64LE9-LABEL: constrained_vector_fmul_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: xsmuldp 3, 3, 6
@@ -820,6 +844,10 @@ entry:
define <3 x double> @constrained_vector_fadd_v3f64(<3 x double> %x, <3 x double> %y) #0 {
; PC64LE-LABEL: constrained_vector_fadd_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: xsadddp 3, 3, 6
@@ -829,6 +857,10 @@ define <3 x double> @constrained_vector_fadd_v3f64(<3 x double> %x, <3 x double>
;
; PC64LE9-LABEL: constrained_vector_fadd_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: xsadddp 3, 3, 6
@@ -970,6 +1002,10 @@ entry:
define <3 x double> @constrained_vector_fsub_v3f64(<3 x double> %x, <3 x double> %y) #0 {
; PC64LE-LABEL: constrained_vector_fsub_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: xssubdp 3, 3, 6
@@ -979,6 +1015,10 @@ define <3 x double> @constrained_vector_fsub_v3f64(<3 x double> %x, <3 x double>
;
; PC64LE9-LABEL: constrained_vector_fsub_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: xssubdp 3, 3, 6
@@ -1105,6 +1145,8 @@ entry:
define <3 x double> @constrained_vector_sqrt_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_sqrt_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xssqrtdp 3, 3
; PC64LE-NEXT: xvsqrtdp 2, 0
@@ -1113,6 +1155,8 @@ define <3 x double> @constrained_vector_sqrt_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_sqrt_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xssqrtdp 3, 3
; PC64LE9-NEXT: xvsqrtdp 2, 0
@@ -1203,6 +1247,7 @@ define <2 x double> @constrained_vector_pow_v2f64(<2 x double> %x, <2 x double>
; PC64LE-NEXT: bl pow
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -1233,6 +1278,7 @@ define <2 x double> @constrained_vector_pow_v2f64(<2 x double> %x, <2 x double>
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl pow
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 61, 1
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 48(1) # 16-byte Folded Reload
@@ -1384,6 +1430,7 @@ define <3 x double> @constrained_vector_pow_v3f64(<3 x double> %x, <3 x double>
; PC64LE-NEXT: fmr 2, 30
; PC64LE-NEXT: bl pow
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 29
; PC64LE-NEXT: fmr 2, 31
@@ -1425,6 +1472,7 @@ define <3 x double> @constrained_vector_pow_v3f64(<3 x double> %x, <3 x double>
; PC64LE9-NEXT: fmr 2, 30
; PC64LE9-NEXT: bl pow
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 29
; PC64LE9-NEXT: fmr 2, 31
@@ -1480,6 +1528,7 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double>
; PC64LE-NEXT: xxswapd 2, 62
; PC64LE-NEXT: bl pow
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 59, 1
; PC64LE-NEXT: xxlor 1, 61, 61
; PC64LE-NEXT: xxlor 2, 63, 63
@@ -1492,6 +1541,7 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double>
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 112
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 60, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 96
@@ -1530,6 +1580,7 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double>
; PC64LE9-NEXT: xxswapd 2, 62
; PC64LE9-NEXT: bl pow
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 59, 1
; PC64LE9-NEXT: xscpsgndp 1, 61, 61
; PC64LE9-NEXT: xscpsgndp 2, 63, 63
@@ -1540,6 +1591,7 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double>
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl pow
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 60, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 96(1) # 16-byte Folded Reload
@@ -1618,6 +1670,7 @@ define <2 x double> @constrained_vector_powi_v2f64(<2 x double> %x, i32 %y) #0 {
; PC64LE-NEXT: bl __powidf2
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: ld 30, 80(1) # 8-byte Folded Reload
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
@@ -1647,6 +1700,7 @@ define <2 x double> @constrained_vector_powi_v2f64(<2 x double> %x, i32 %y) #0 {
; PC64LE9-NEXT: mr 4, 30
; PC64LE9-NEXT: bl __powidf2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -1790,6 +1844,7 @@ define <3 x double> @constrained_vector_powi_v3f64(<3 x double> %x, i32 %y) #0 {
; PC64LE-NEXT: mr 4, 30
; PC64LE-NEXT: bl __powidf2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: mr 4, 30
@@ -1828,6 +1883,7 @@ define <3 x double> @constrained_vector_powi_v3f64(<3 x double> %x, i32 %y) #0 {
; PC64LE9-NEXT: mr 4, 30
; PC64LE9-NEXT: bl __powidf2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: mr 4, 30
@@ -1878,6 +1934,7 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 {
; PC64LE-NEXT: mr 4, 30
; PC64LE-NEXT: bl __powidf2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: mr 4, 30
@@ -1890,6 +1947,7 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: ld 30, 96(1) # 8-byte Folded Reload
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
@@ -1923,6 +1981,7 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 {
; PC64LE9-NEXT: mr 4, 30
; PC64LE9-NEXT: bl __powidf2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: mr 4, 30
@@ -1933,6 +1992,7 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 {
; PC64LE9-NEXT: mr 4, 30
; PC64LE9-NEXT: bl __powidf2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -2003,6 +2063,7 @@ define <2 x double> @constrained_vector_sin_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl sin
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -2027,6 +2088,7 @@ define <2 x double> @constrained_vector_sin_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl sin
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -2149,6 +2211,7 @@ define <3 x double> @constrained_vector_sin_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl sin
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl sin
@@ -2181,6 +2244,7 @@ define <3 x double> @constrained_vector_sin_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl sin
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl sin
@@ -2224,6 +2288,7 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl sin
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl sin
@@ -2234,6 +2299,7 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -2262,6 +2328,7 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl sin
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl sin
@@ -2270,6 +2337,7 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl sin
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -2338,6 +2406,7 @@ define <2 x double> @constrained_vector_cos_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl cos
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -2362,6 +2431,7 @@ define <2 x double> @constrained_vector_cos_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl cos
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -2484,6 +2554,7 @@ define <3 x double> @constrained_vector_cos_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl cos
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl cos
@@ -2516,6 +2587,7 @@ define <3 x double> @constrained_vector_cos_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl cos
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl cos
@@ -2559,6 +2631,7 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl cos
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl cos
@@ -2569,6 +2642,7 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -2597,6 +2671,7 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl cos
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl cos
@@ -2605,6 +2680,7 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl cos
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -2673,6 +2749,7 @@ define <2 x double> @constrained_vector_exp_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl exp
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -2697,6 +2774,7 @@ define <2 x double> @constrained_vector_exp_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl exp
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -2819,6 +2897,7 @@ define <3 x double> @constrained_vector_exp_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl exp
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl exp
@@ -2851,6 +2930,7 @@ define <3 x double> @constrained_vector_exp_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl exp
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl exp
@@ -2894,6 +2974,7 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl exp
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl exp
@@ -2904,6 +2985,7 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -2932,6 +3014,7 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl exp
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl exp
@@ -2940,6 +3023,7 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl exp
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -3008,6 +3092,7 @@ define <2 x double> @constrained_vector_exp2_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl exp2
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -3032,6 +3117,7 @@ define <2 x double> @constrained_vector_exp2_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl exp2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -3154,6 +3240,7 @@ define <3 x double> @constrained_vector_exp2_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl exp2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl exp2
@@ -3186,6 +3273,7 @@ define <3 x double> @constrained_vector_exp2_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl exp2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl exp2
@@ -3229,6 +3317,7 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl exp2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl exp2
@@ -3239,6 +3328,7 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -3267,6 +3357,7 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl exp2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl exp2
@@ -3275,6 +3366,7 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl exp2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -3343,6 +3435,7 @@ define <2 x double> @constrained_vector_log_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl log
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -3367,6 +3460,7 @@ define <2 x double> @constrained_vector_log_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -3489,6 +3583,7 @@ define <3 x double> @constrained_vector_log_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl log
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl log
@@ -3521,6 +3616,7 @@ define <3 x double> @constrained_vector_log_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl log
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl log
@@ -3564,6 +3660,7 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl log
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl log
@@ -3574,6 +3671,7 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -3602,6 +3700,7 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl log
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl log
@@ -3610,6 +3709,7 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -3678,6 +3778,7 @@ define <2 x double> @constrained_vector_log10_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl log10
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -3702,6 +3803,7 @@ define <2 x double> @constrained_vector_log10_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log10
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -3824,6 +3926,7 @@ define <3 x double> @constrained_vector_log10_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl log10
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl log10
@@ -3856,6 +3959,7 @@ define <3 x double> @constrained_vector_log10_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl log10
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl log10
@@ -3899,6 +4003,7 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl log10
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl log10
@@ -3909,6 +4014,7 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -3937,6 +4043,7 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl log10
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl log10
@@ -3945,6 +4052,7 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log10
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -4013,6 +4121,7 @@ define <2 x double> @constrained_vector_log2_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl log2
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -4037,6 +4146,7 @@ define <2 x double> @constrained_vector_log2_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -4159,6 +4269,7 @@ define <3 x double> @constrained_vector_log2_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl log2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl log2
@@ -4191,6 +4302,7 @@ define <3 x double> @constrained_vector_log2_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl log2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl log2
@@ -4234,6 +4346,7 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl log2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl log2
@@ -4244,6 +4357,7 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -4272,6 +4386,7 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl log2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl log2
@@ -4280,6 +4395,7 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl log2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -4387,6 +4503,8 @@ define <3 x float> @constrained_vector_rint_v3f32(<3 x float> %x) #0 {
define <3 x double> @constrained_vector_rint_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_rint_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpic 3, 3
; PC64LE-NEXT: xvrdpic 2, 0
@@ -4395,6 +4513,8 @@ define <3 x double> @constrained_vector_rint_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_rint_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpic 3, 3
; PC64LE9-NEXT: xvrdpic 2, 0
@@ -4479,6 +4599,7 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl nearbyint
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -4503,6 +4624,7 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl nearbyint
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -4625,6 +4747,7 @@ define <3 x double> @constrained_vector_nearby_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl nearbyint
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl nearbyint
@@ -4657,6 +4780,7 @@ define <3 x double> @constrained_vector_nearby_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl nearbyint
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl nearbyint
@@ -4700,6 +4824,7 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl nearbyint
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl nearbyint
@@ -4710,6 +4835,7 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -4738,6 +4864,7 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl nearbyint
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl nearbyint
@@ -4746,6 +4873,7 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl nearbyint
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -4927,6 +5055,10 @@ define <3 x double> @constrained_vector_max_v3f64(<3 x double> %x, <3 x double>
; PC64LE-NEXT: mflr 0
; PC64LE-NEXT: stdu 1, -64(1)
; PC64LE-NEXT: li 3, 48
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: std 0, 80(1)
@@ -4950,6 +5082,10 @@ define <3 x double> @constrained_vector_max_v3f64(<3 x double> %x, <3 x double>
; PC64LE9: # %bb.0: # %entry
; PC64LE9-NEXT: mflr 0
; PC64LE9-NEXT: stdu 1, -48(1)
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: std 0, 64(1)
@@ -5159,6 +5295,10 @@ define <3 x double> @constrained_vector_min_v3f64(<3 x double> %x, <3 x double>
; PC64LE-NEXT: mflr 0
; PC64LE-NEXT: stdu 1, -64(1)
; PC64LE-NEXT: li 3, 48
+; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 5, 4
; PC64LE-NEXT: xxmrghd 1, 2, 1
; PC64LE-NEXT: std 0, 80(1)
@@ -5182,6 +5322,10 @@ define <3 x double> @constrained_vector_min_v3f64(<3 x double> %x, <3 x double>
; PC64LE9: # %bb.0: # %entry
; PC64LE9-NEXT: mflr 0
; PC64LE9-NEXT: stdu 1, -48(1)
+; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5
+; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 5, 4
; PC64LE9-NEXT: xxmrghd 1, 2, 1
; PC64LE9-NEXT: std 0, 64(1)
@@ -6520,6 +6664,8 @@ entry:
define <3 x double> @constrained_vector_ceil_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_ceil_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpip 3, 3
; PC64LE-NEXT: xvrdpip 2, 0
@@ -6528,6 +6674,8 @@ define <3 x double> @constrained_vector_ceil_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_ceil_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpip 3, 3
; PC64LE9-NEXT: xvrdpip 2, 0
@@ -6628,6 +6776,8 @@ entry:
define <3 x double> @constrained_vector_floor_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_floor_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpim 3, 3
; PC64LE-NEXT: xvrdpim 2, 0
@@ -6636,6 +6786,8 @@ define <3 x double> @constrained_vector_floor_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_floor_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpim 3, 3
; PC64LE9-NEXT: xvrdpim 2, 0
@@ -6736,6 +6888,8 @@ entry:
define <3 x double> @constrained_vector_round_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_round_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpi 3, 3
; PC64LE-NEXT: xvrdpi 2, 0
@@ -6744,6 +6898,8 @@ define <3 x double> @constrained_vector_round_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_round_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpi 3, 3
; PC64LE9-NEXT: xvrdpi 2, 0
@@ -6843,6 +6999,8 @@ entry:
define <3 x double> @constrained_vector_trunc_v3f64(<3 x double> %x) #0 {
; PC64LE-LABEL: constrained_vector_trunc_v3f64:
; PC64LE: # %bb.0: # %entry
+; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 0, 2, 1
; PC64LE-NEXT: xsrdpiz 3, 3
; PC64LE-NEXT: xvrdpiz 2, 0
@@ -6851,6 +7009,8 @@ define <3 x double> @constrained_vector_trunc_v3f64(<3 x double> %x) #0 {
;
; PC64LE9-LABEL: constrained_vector_trunc_v3f64:
; PC64LE9: # %bb.0: # %entry
+; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 0, 2, 1
; PC64LE9-NEXT: xsrdpiz 3, 3
; PC64LE9-NEXT: xvrdpiz 2, 0
@@ -8049,6 +8209,7 @@ define <2 x double> @constrained_vector_tan_v2f64(<2 x double> %x) #0 {
; PC64LE-NEXT: bl tan
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 64
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 62, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 48
@@ -8073,6 +8234,7 @@ define <2 x double> @constrained_vector_tan_v2f64(<2 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl tan
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 62, 1
; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload
@@ -8195,6 +8357,7 @@ define <3 x double> @constrained_vector_tan_v3f64(<3 x double> %x) #0 {
; PC64LE-NEXT: fmr 1, 30
; PC64LE-NEXT: bl tan
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: bl tan
@@ -8227,6 +8390,7 @@ define <3 x double> @constrained_vector_tan_v3f64(<3 x double> %x) #0 {
; PC64LE9-NEXT: fmr 1, 30
; PC64LE9-NEXT: bl tan
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: bl tan
@@ -8270,6 +8434,7 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: xxswapd 1, 62
; PC64LE-NEXT: bl tan
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 61, 1
; PC64LE-NEXT: xxlor 1, 63, 63
; PC64LE-NEXT: bl tan
@@ -8280,6 +8445,7 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 {
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -8308,6 +8474,7 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 62
; PC64LE9-NEXT: bl tan
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 61, 1
; PC64LE9-NEXT: xscpsgndp 1, 63, 63
; PC64LE9-NEXT: bl tan
@@ -8316,6 +8483,7 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 {
; PC64LE9-NEXT: xxswapd 1, 63
; PC64LE9-NEXT: bl tan
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 61, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
@@ -8390,6 +8558,7 @@ define <2 x double> @constrained_vector_atan2_v2f64(<2 x double> %x, <2 x double
; PC64LE-NEXT: bl atan2
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 80
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 34, 61, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 64
@@ -8420,6 +8589,7 @@ define <2 x double> @constrained_vector_atan2_v2f64(<2 x double> %x, <2 x double
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl atan2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 34, 61, 1
; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload
; PC64LE9-NEXT: lxv 62, 48(1) # 16-byte Folded Reload
@@ -8571,6 +8741,7 @@ define <3 x double> @constrained_vector_atan2_v3f64(<3 x double> %x, <3 x double
; PC64LE-NEXT: fmr 2, 30
; PC64LE-NEXT: bl atan2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 63, 1, 63
; PC64LE-NEXT: fmr 1, 29
; PC64LE-NEXT: fmr 2, 31
@@ -8612,6 +8783,7 @@ define <3 x double> @constrained_vector_atan2_v3f64(<3 x double> %x, <3 x double
; PC64LE9-NEXT: fmr 2, 30
; PC64LE9-NEXT: bl atan2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 63, 1, 63
; PC64LE9-NEXT: fmr 1, 29
; PC64LE9-NEXT: fmr 2, 31
@@ -8667,6 +8839,7 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double
; PC64LE-NEXT: xxswapd 2, 62
; PC64LE-NEXT: bl atan2
; PC64LE-NEXT: nop
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 62, 59, 1
; PC64LE-NEXT: xxlor 1, 61, 61
; PC64LE-NEXT: xxlor 2, 63, 63
@@ -8679,6 +8852,7 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double
; PC64LE-NEXT: nop
; PC64LE-NEXT: li 3, 112
; PC64LE-NEXT: vmr 2, 30
+; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE-NEXT: xxmrghd 35, 60, 1
; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
; PC64LE-NEXT: li 3, 96
@@ -8717,6 +8891,7 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double
; PC64LE9-NEXT: xxswapd 2, 62
; PC64LE9-NEXT: bl atan2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 62, 59, 1
; PC64LE9-NEXT: xscpsgndp 1, 61, 61
; PC64LE9-NEXT: xscpsgndp 2, 63, 63
@@ -8727,6 +8902,7 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double
; PC64LE9-NEXT: xxswapd 2, 63
; PC64LE9-NEXT: bl atan2
; PC64LE9-NEXT: nop
+; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1
; PC64LE9-NEXT: xxmrghd 35, 60, 1
; PC64LE9-NEXT: vmr 2, 30
; PC64LE9-NEXT: lxv 63, 96(1) # 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-and.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-and.ll
index 57d4c48..b41220b 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-and.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-and.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
+; Test file to verify the emission of Vector Evaluate instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_xor_BC_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_xor_BC_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 22
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -33,12 +31,10 @@ define <2 x i64> @ternary_A_xor_BC_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_xor_BC_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 22
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -52,11 +48,9 @@ define <16 x i8> @ternary_A_xor_BC_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_xor_BC_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 22
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -70,11 +64,9 @@ define <8 x i16> @ternary_A_xor_BC_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_xor_BC_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 22
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -88,11 +80,9 @@ define <4 x i32> @ternary_A_nor_BC_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_nor_BC_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 24
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -107,12 +97,10 @@ define <2 x i64> @ternary_A_nor_BC_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_nor_BC_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 24
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -127,11 +115,9 @@ define <16 x i8> @ternary_A_nor_BC_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_nor_BC_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 24
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -146,11 +132,9 @@ define <8 x i16> @ternary_A_nor_BC_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_nor_BC_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 24
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -165,11 +149,9 @@ define <4 x i32> @ternary_A_eqv_BC_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_eqv_BC_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 25
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -184,12 +166,10 @@ define <2 x i64> @ternary_A_eqv_BC_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_eqv_BC_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 25
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -204,11 +184,9 @@ define <16 x i8> @ternary_A_eqv_BC_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_eqv_BC_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 25
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -223,11 +201,9 @@ define <8 x i16> @ternary_A_eqv_BC_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_eqv_BC_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 25
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -242,11 +218,9 @@ define <4 x i32> @ternary_A_not_C_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_not_C_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 26
; CHECK-NEXT: blr
entry:
%not = xor <4 x i32> %C, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation
@@ -260,12 +234,10 @@ define <2 x i64> @ternary_A_not_C_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_not_C_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 26
; CHECK-NEXT: blr
entry:
%not = xor <2 x i64> %C, <i64 -1, i64 -1> ; Vector not operation
@@ -279,11 +251,9 @@ define <16 x i8> @ternary_A_not_C_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_not_C_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 26
; CHECK-NEXT: blr
entry:
%not = xor <16 x i8> %C, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation
@@ -297,11 +267,9 @@ define <8 x i16> @ternary_A_not_C_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_not_C_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 26
; CHECK-NEXT: blr
entry:
%not = xor <8 x i16> %C, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation
@@ -315,11 +283,9 @@ define <4 x i32> @ternary_A_not_B_and_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_not_B_and_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 28
; CHECK-NEXT: blr
entry:
%not = xor <4 x i32> %B, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation
@@ -333,12 +299,10 @@ define <2 x i64> @ternary_A_not_B_and_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_not_B_and_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 28
; CHECK-NEXT: blr
entry:
%not = xor <2 x i64> %B, <i64 -1, i64 -1> ; Vector not operation
@@ -352,11 +316,9 @@ define <16 x i8> @ternary_A_not_B_and_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_not_B_and_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 28
; CHECK-NEXT: blr
entry:
%not = xor <16 x i8> %B, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation
@@ -370,11 +332,9 @@ define <8 x i16> @ternary_A_not_B_and_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_not_B_and_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxland vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 28
; CHECK-NEXT: blr
entry:
%not = xor <8 x i16> %B, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 3fcaa81..3225120 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -1302,14 +1302,14 @@ define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result
; RV32I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; RV32I-NEXT: G_STORE [[C]](s32), [[COPY]](p0) :: (store (s32) into %ir.agg.result)
; RV32I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; RV32I-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C4]](s32)
- ; RV32I-NEXT: G_STORE [[C1]](s32), %3(p0) :: (store (s32) into %ir.b)
+ ; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; RV32I-NEXT: G_STORE [[C1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.b)
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; RV32I-NEXT: %6:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C5]](s32)
- ; RV32I-NEXT: G_STORE [[C2]](s32), %6(p0) :: (store (s32) into %ir.c)
+ ; RV32I-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; RV32I-NEXT: G_STORE [[C2]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %ir.c)
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; RV32I-NEXT: %9:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C6]](s32)
- ; RV32I-NEXT: G_STORE [[C3]](s32), %9(p0) :: (store (s32) into %ir.d)
+ ; RV32I-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; RV32I-NEXT: G_STORE [[C3]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %ir.d)
; RV32I-NEXT: PseudoRET
store i32 1, ptr %agg.result, align 4
%b = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 1
@@ -1331,8 +1331,8 @@ define i32 @caller_large_struct_ret() nounwind {
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s32) from %ir.1)
; ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; ILP32-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
- ; ILP32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %3(p0) :: (dereferenceable load (s32) from %ir.3)
+ ; ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
+ ; ILP32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s32) from %ir.3)
; ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]]
; ILP32-NEXT: $x10 = COPY [[ADD]](s32)
; ILP32-NEXT: PseudoRET implicit $x10
@@ -1346,8 +1346,8 @@ define i32 @caller_large_struct_ret() nounwind {
; ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s32) from %ir.1)
; ILP32F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; ILP32F-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
- ; ILP32F-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %3(p0) :: (dereferenceable load (s32) from %ir.3)
+ ; ILP32F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
+ ; ILP32F-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s32) from %ir.3)
; ILP32F-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]]
; ILP32F-NEXT: $x10 = COPY [[ADD]](s32)
; ILP32F-NEXT: PseudoRET implicit $x10
@@ -1361,8 +1361,8 @@ define i32 @caller_large_struct_ret() nounwind {
; ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32D-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s32) from %ir.1)
; ILP32D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; ILP32D-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
- ; ILP32D-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %3(p0) :: (dereferenceable load (s32) from %ir.3)
+ ; ILP32D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
+ ; ILP32D-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s32) from %ir.3)
; ILP32D-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]]
; ILP32D-NEXT: $x10 = COPY [[ADD]](s32)
; ILP32D-NEXT: PseudoRET implicit $x10
@@ -1392,13 +1392,13 @@ define %struct.large2 @callee_large_struct_ret2() nounwind {
; RV32I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; RV32I-NEXT: G_STORE [[C]](s32), [[COPY]](p0) :: (store (s32), align 8)
; RV32I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; RV32I-NEXT: G_STORE [[C1]](s32), [[PTR_ADD]](p0) :: (store (s32))
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; RV32I-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s32)
+ ; RV32I-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s32)
; RV32I-NEXT: G_STORE [[C2]](s16), [[PTR_ADD1]](p0) :: (store (s16), align 8)
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; RV32I-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s32)
+ ; RV32I-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s32)
; RV32I-NEXT: G_STORE [[C3]](s32), [[PTR_ADD2]](p0) :: (store (s32))
; RV32I-NEXT: PseudoRET
%a = insertvalue %struct.large2 poison, i32 1, 0
@@ -1418,13 +1418,13 @@ define i32 @caller_large_struct_ret2() nounwind {
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.0, align 8)
; ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
+ ; ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
; ILP32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %stack.0)
; ILP32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
+ ; ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
; ILP32-NEXT: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from %stack.0, align 8)
; ILP32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
+ ; ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
; ILP32-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %stack.0)
; ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD3]]
; ILP32-NEXT: $x10 = COPY [[ADD]](s32)
@@ -1439,13 +1439,13 @@ define i32 @caller_large_struct_ret2() nounwind {
; ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.0, align 8)
; ILP32F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; ILP32F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
+ ; ILP32F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
; ILP32F-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %stack.0)
; ILP32F-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; ILP32F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
+ ; ILP32F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
; ILP32F-NEXT: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from %stack.0, align 8)
; ILP32F-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; ILP32F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
+ ; ILP32F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
; ILP32F-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %stack.0)
; ILP32F-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD3]]
; ILP32F-NEXT: $x10 = COPY [[ADD]](s32)
@@ -1460,13 +1460,13 @@ define i32 @caller_large_struct_ret2() nounwind {
; ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32D-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.0, align 8)
; ILP32D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; ILP32D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
+ ; ILP32D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
; ILP32D-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %stack.0)
; ILP32D-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; ILP32D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
+ ; ILP32D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s32)
; ILP32D-NEXT: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD1]](p0) :: (load (s16) from %stack.0, align 8)
; ILP32D-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; ILP32D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
+ ; ILP32D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
; ILP32D-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %stack.0)
; ILP32D-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD3]]
; ILP32D-NEXT: $x10 = COPY [[ADD]](s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
index 17c6e55..a297358 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -1075,14 +1075,14 @@ define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result
; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; RV64I-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.agg.result, align 4)
; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; RV64I-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C4]](s64)
- ; RV64I-NEXT: G_STORE [[C1]](s64), %3(p0) :: (store (s64) into %ir.b, align 4)
+ ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; RV64I-NEXT: G_STORE [[C1]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.b, align 4)
; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; RV64I-NEXT: %6:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C5]](s64)
- ; RV64I-NEXT: G_STORE [[C2]](s64), %6(p0) :: (store (s64) into %ir.c, align 4)
+ ; RV64I-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; RV64I-NEXT: G_STORE [[C2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %ir.c, align 4)
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; RV64I-NEXT: %9:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C6]](s64)
- ; RV64I-NEXT: G_STORE [[C3]](s64), %9(p0) :: (store (s64) into %ir.d, align 4)
+ ; RV64I-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; RV64I-NEXT: G_STORE [[C3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %ir.d, align 4)
; RV64I-NEXT: PseudoRET
store i64 1, ptr %agg.result, align 4
%b = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 1
@@ -1104,8 +1104,8 @@ define i64 @caller_large_struct_ret() nounwind {
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; LP64-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
- ; LP64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (dereferenceable load (s64) from %ir.3)
+ ; LP64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; LP64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s64) from %ir.3)
; LP64-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD1]]
; LP64-NEXT: $x10 = COPY [[ADD]](s64)
; LP64-NEXT: PseudoRET implicit $x10
@@ -1119,8 +1119,8 @@ define i64 @caller_large_struct_ret() nounwind {
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; LP64F-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
- ; LP64F-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (dereferenceable load (s64) from %ir.3)
+ ; LP64F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; LP64F-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s64) from %ir.3)
; LP64F-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD1]]
; LP64F-NEXT: $x10 = COPY [[ADD]](s64)
; LP64F-NEXT: PseudoRET implicit $x10
@@ -1134,8 +1134,8 @@ define i64 @caller_large_struct_ret() nounwind {
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; LP64D-NEXT: %3:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
- ; LP64D-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (dereferenceable load (s64) from %ir.3)
+ ; LP64D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; LP64D-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s64) from %ir.3)
; LP64D-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD1]]
; LP64D-NEXT: $x10 = COPY [[ADD]](s64)
; LP64D-NEXT: PseudoRET implicit $x10
@@ -1165,13 +1165,13 @@ define %struct.large2 @callee_large_struct_ret2() nounwind {
; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; RV64I-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64), align 16)
; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; RV64I-NEXT: G_STORE [[C1]](s128), [[PTR_ADD]](p0) :: (store (s128))
; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; RV64I-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+ ; RV64I-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64)
; RV64I-NEXT: G_STORE [[C2]](s64), [[PTR_ADD1]](p0) :: (store (s64), align 16)
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; RV64I-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+ ; RV64I-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64)
; RV64I-NEXT: G_STORE [[C3]](s64), [[PTR_ADD2]](p0) :: (store (s64))
; RV64I-NEXT: PseudoRET
%a = insertvalue %struct.large2 poison, i64 1, 0
@@ -1191,13 +1191,13 @@ define i64 @caller_large_struct_ret2() nounwind {
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.0, align 16)
; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; LP64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; LP64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; LP64-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %stack.0)
; LP64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; LP64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
+ ; LP64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
; LP64-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from %stack.0, align 16)
; LP64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; LP64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
+ ; LP64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
; LP64-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from %stack.0)
; LP64-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD3]]
; LP64-NEXT: $x10 = COPY [[ADD]](s64)
@@ -1212,13 +1212,13 @@ define i64 @caller_large_struct_ret2() nounwind {
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.0, align 16)
; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; LP64F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; LP64F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; LP64F-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %stack.0)
; LP64F-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; LP64F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
+ ; LP64F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
; LP64F-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from %stack.0, align 16)
; LP64F-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; LP64F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
+ ; LP64F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
; LP64F-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from %stack.0)
; LP64F-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD3]]
; LP64F-NEXT: $x10 = COPY [[ADD]](s64)
@@ -1233,13 +1233,13 @@ define i64 @caller_large_struct_ret2() nounwind {
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.0, align 16)
; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
- ; LP64D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+ ; LP64D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; LP64D-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %stack.0)
; LP64D-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; LP64D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
+ ; LP64D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
; LP64D-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from %stack.0, align 16)
; LP64D-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; LP64D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
+ ; LP64D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64)
; LP64D-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from %stack.0)
; LP64D-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD3]]
; LP64D-NEXT: $x10 = COPY [[ADD]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
index 3b12ad5..e985d1f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
@@ -67,7 +67,7 @@ define i32 @va1(ptr %fmt, ...) {
; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va)
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; RV32-NEXT: %20:_(p0) = nuw nusw G_PTR_ADD [[LOAD]], [[C1]](s32)
+ ; RV32-NEXT: %20:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32)
; RV32-NEXT: G_STORE %20(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
; RV32-NEXT: $x10 = COPY [[LOAD1]](s32)
@@ -105,7 +105,7 @@ define i32 @va1(ptr %fmt, ...) {
; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va, align 4)
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; RV64-NEXT: %20:_(p0) = nuw nusw G_PTR_ADD [[LOAD]], [[C1]](s64)
+ ; RV64-NEXT: %20:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64)
; RV64-NEXT: G_STORE %20(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
@@ -687,7 +687,7 @@ define i64 @va2(ptr %fmt, ...) nounwind {
; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; RV32-NEXT: %25:_(p0) = nuw nusw G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
+ ; RV32-NEXT: %25:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
; RV32-NEXT: G_STORE %25(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
@@ -733,7 +733,7 @@ define i64 @va2(ptr %fmt, ...) nounwind {
; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; RV64-NEXT: %25:_(p0) = nuw nusw G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
+ ; RV64-NEXT: %25:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
; RV64-NEXT: G_STORE %25(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
@@ -974,7 +974,7 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; RV32-NEXT: %24:_(p0) = nuw nusw G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
+ ; RV32-NEXT: %24:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
; RV32-NEXT: G_STORE %24(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
@@ -1020,7 +1020,7 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; RV64-NEXT: %25:_(p0) = nuw nusw G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
+ ; RV64-NEXT: %25:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
; RV64-NEXT: G_STORE %25(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
@@ -1724,7 +1724,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
; RV32-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s32) into %ir.va)
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va)
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; RV32-NEXT: %21:_(p0) = nuw nusw G_PTR_ADD [[LOAD]], [[C1]](s32)
+ ; RV32-NEXT: %21:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32)
; RV32-NEXT: G_STORE %21(p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va)
; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
; RV32-NEXT: $x10 = COPY [[LOAD1]](s32)
@@ -1763,7 +1763,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
; RV64-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s64) into %ir.va)
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va, align 4)
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; RV64-NEXT: %21:_(p0) = nuw nusw G_PTR_ADD [[LOAD]], [[C1]](s64)
+ ; RV64-NEXT: %21:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64)
; RV64-NEXT: G_STORE %21(p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va, align 4)
; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-icmp-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-icmp-rv32.mir
index 8081cfb..e93f82a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-icmp-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-icmp-rv32.mir
@@ -1545,21 +1545,21 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s32)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from unknown-address + 8, align 8)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from unknown-address + 12)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p0) :: (load (s32), align 8)
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s32)
; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load (s32) from unknown-address + 4)
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s32)
; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from unknown-address + 8, align 8)
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s32)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s32)
; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from unknown-address + 12)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[LOAD]](s32), [[LOAD4]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[LOAD1]](s32), [[LOAD5]]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
index 93b145c..9d2b6c1 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
@@ -147,7 +147,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; CHECK-NEXT: $x10 = COPY [[LOAD]](s32)
; CHECK-NEXT: $x11 = COPY [[LOAD1]](s32)
@@ -159,7 +159,7 @@ body: |
; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s32)
@@ -232,7 +232,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
@@ -278,15 +278,15 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CHECK-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -331,7 +331,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
@@ -376,15 +376,15 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD1]], [[C1]](s32)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s32)
; CHECK-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32)
; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -392,15 +392,15 @@ body: |
; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[OR1]], [[C3]](s32)
; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s32)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s32)
; CHECK-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
; CHECK-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXTLOAD4]], [[C1]](s32)
; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[ZEXTLOAD3]]
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
; CHECK-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CHECK-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C1]](s32)
; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[SHL4]], [[ZEXTLOAD5]]
@@ -416,7 +416,7 @@ body: |
; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s32)
; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4, align 1)
; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
index d85d2c5..06e84fd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
@@ -188,7 +188,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8)
; CHECK-NEXT: $x10 = COPY [[LOAD]](s64)
; CHECK-NEXT: $x11 = COPY [[LOAD1]](s64)
@@ -200,7 +200,7 @@ body: |
; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8)
; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s64)
@@ -273,7 +273,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s16)
@@ -320,15 +320,15 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD1]], [[C1]](s64)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[ZEXTLOAD]]
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C1]](s64)
@@ -377,7 +377,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
@@ -423,15 +423,15 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD1]], [[C1]](s64)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[ZEXTLOAD]]
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD3]], [[C1]](s64)
; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -439,15 +439,15 @@ body: |
; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[C3]](s64)
; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[OR]]
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD5]], [[C1]](s64)
; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SHL3]], [[ZEXTLOAD4]]
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CHECK-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CHECK-NEXT: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[LOAD]], [[C1]](s64)
; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[ZEXTLOAD6]]
@@ -494,15 +494,15 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD1]], [[C1]](s64)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[ZEXTLOAD]]
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s16) from unknown-address + 4)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s16) from unknown-address + 6)
; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[LOAD]], [[C1]](s64)
; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -549,15 +549,15 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD1]], [[C1]](s64)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[ZEXTLOAD]]
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD3:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD3]], [[C1]](s64)
; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[ZEXTLOAD2]]
@@ -565,15 +565,15 @@ body: |
; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[C3]](s64)
; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[OR]]
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK-NEXT: [[ZEXTLOAD4:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD3]](p0) :: (load (s8) from unknown-address + 4)
- ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD5:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD4]](p0) :: (load (s8) from unknown-address + 5)
; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD5]], [[C1]](s64)
; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SHL3]], [[ZEXTLOAD4]]
- ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
; CHECK-NEXT: [[ZEXTLOAD6:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD5]](p0) :: (load (s8) from unknown-address + 6)
- ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s8) from unknown-address + 7)
; CHECK-NEXT: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[LOAD]], [[C1]](s64)
; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[ZEXTLOAD6]]
@@ -582,29 +582,29 @@ body: |
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK-NEXT: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[OR5]], [[C5]](s64)
; CHECK-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[OR2]]
- ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK-NEXT: [[ZEXTLOAD7:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD7]](p0) :: (load (s8) from unknown-address + 8)
- ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD8:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD8]](p0) :: (load (s8) from unknown-address + 9)
; CHECK-NEXT: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD8]], [[C1]](s64)
; CHECK-NEXT: [[OR7:%[0-9]+]]:_(s64) = G_OR [[SHL7]], [[ZEXTLOAD7]]
- ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
; CHECK-NEXT: [[ZEXTLOAD9:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD9]](p0) :: (load (s8) from unknown-address + 10)
- ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD9]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD10:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD10]](p0) :: (load (s8) from unknown-address + 11)
; CHECK-NEXT: [[SHL8:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD10]], [[C1]](s64)
; CHECK-NEXT: [[OR8:%[0-9]+]]:_(s64) = G_OR [[SHL8]], [[ZEXTLOAD9]]
; CHECK-NEXT: [[SHL9:%[0-9]+]]:_(s64) = G_SHL [[OR8]], [[C3]](s64)
; CHECK-NEXT: [[OR9:%[0-9]+]]:_(s64) = G_OR [[SHL9]], [[OR7]]
- ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
; CHECK-NEXT: [[ZEXTLOAD11:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD11]](p0) :: (load (s8) from unknown-address + 12)
- ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C]](s64)
; CHECK-NEXT: [[ZEXTLOAD12:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD12]](p0) :: (load (s8) from unknown-address + 13)
; CHECK-NEXT: [[SHL10:%[0-9]+]]:_(s64) = G_SHL [[ZEXTLOAD12]], [[C1]](s64)
; CHECK-NEXT: [[OR10:%[0-9]+]]:_(s64) = G_OR [[SHL10]], [[ZEXTLOAD11]]
- ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
; CHECK-NEXT: [[ZEXTLOAD13:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD13]](p0) :: (load (s8) from unknown-address + 14)
- ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
+ ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD13]], [[C]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD14]](p0) :: (load (s8) from unknown-address + 15)
; CHECK-NEXT: [[SHL11:%[0-9]+]]:_(s64) = G_SHL [[LOAD1]], [[C1]](s64)
; CHECK-NEXT: [[OR11:%[0-9]+]]:_(s64) = G_OR [[SHL11]], [[ZEXTLOAD13]]
@@ -622,7 +622,7 @@ body: |
; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64), align 1)
; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64)
; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8, align 1)
; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
index 5a7a042..cb5db22 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
@@ -149,7 +149,7 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
; CHECK-NEXT: G_STORE [[COPY]](s32), [[COPY2]](p0) :: (store (s32), align 8)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s32)
; CHECK-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
; CHECK-NEXT: PseudoRET
;
@@ -161,7 +161,7 @@ body: |
; UNALIGNED-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY2]](p0) :: (store (s32), align 8)
; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
+ ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s32)
; UNALIGNED-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
; UNALIGNED-NEXT: PseudoRET
%2:_(s32) = COPY $x10
@@ -239,7 +239,7 @@ body: |
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C]](s32)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s32)
; CHECK-NEXT: G_STORE [[COPY2]](s16), [[COPY1]](p0) :: (store (s8))
; CHECK-NEXT: G_STORE [[TRUNC1]](s16), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 1)
; CHECK-NEXT: PseudoRET
@@ -284,7 +284,7 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s32)
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
@@ -292,14 +292,14 @@ body: |
; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s32)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s32)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s32)
; CHECK-NEXT: G_STORE [[TRUNC]](s16), [[COPY1]](p0) :: (store (s8))
; CHECK-NEXT: G_STORE [[TRUNC1]](s16), [[PTR_ADD1]](p0) :: (store (s8) into unknown-address + 1)
; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[C5]](s32)
; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C4]](s32)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s32)
; CHECK-NEXT: G_STORE [[TRUNC2]](s16), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 2)
; CHECK-NEXT: G_STORE [[TRUNC3]](s16), [[PTR_ADD2]](p0) :: (store (s8) into unknown-address + 3)
; CHECK-NEXT: PseudoRET
@@ -342,7 +342,7 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s32)
; CHECK-NEXT: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store (s16))
; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 2)
; CHECK-NEXT: PseudoRET
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
index 8704dde..7c1ede0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
@@ -268,7 +268,7 @@ body: |
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[COPY2]](s16), [[COPY1]](p0) :: (store (s8))
; CHECK-NEXT: G_STORE [[TRUNC1]](s16), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 1)
; CHECK-NEXT: PseudoRET
@@ -315,7 +315,7 @@ body: |
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
@@ -323,7 +323,7 @@ body: |
; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C3]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s64)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[TRUNC]](s16), [[COPY1]](p0) :: (store (s8))
; CHECK-NEXT: G_STORE [[TRUNC1]](s16), [[PTR_ADD1]](p0) :: (store (s8) into unknown-address + 1)
; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s64)
@@ -331,7 +331,7 @@ body: |
; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C4]]
; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C6]](s64)
; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s64)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
; CHECK-NEXT: G_STORE [[TRUNC2]](s16), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 2)
; CHECK-NEXT: G_STORE [[TRUNC3]](s16), [[PTR_ADD2]](p0) :: (store (s8) into unknown-address + 3)
; CHECK-NEXT: PseudoRET
@@ -381,7 +381,7 @@ body: |
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK-NEXT: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store (s16))
; CHECK-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 2)
; CHECK-NEXT: PseudoRET
@@ -426,7 +426,7 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY2]], [[C]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
@@ -434,14 +434,14 @@ body: |
; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C2]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s16))
; CHECK-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD1]](p0) :: (store (s16) into unknown-address + 2)
; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[C5]](s64)
; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR2]](s64)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
; CHECK-NEXT: G_STORE [[TRUNC2]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 4)
; CHECK-NEXT: G_STORE [[TRUNC3]](s32), [[PTR_ADD2]](p0) :: (store (s16) into unknown-address + 6)
; CHECK-NEXT: PseudoRET
diff --git a/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..34493ce
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,33 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
new file mode 100644
index 0000000..08340bb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
@@ -0,0 +1,449 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv32 -mattr=+e -target-abi ilp32e < %s | FileCheck %s -check-prefix=RV32E
+; RUN: llc -mtriple=riscv64 -mattr=+e -target-abi lp64e < %s | FileCheck %s -check-prefix=RV64E
+
+; Check the PreserveMost calling convention works.
+
+declare void @standard_cc_func()
+declare preserve_mostcc void @preserve_mostcc_func()
+
+define preserve_mostcc void @preserve_mostcc1() nounwind {
+; RV32I-LABEL: preserve_mostcc1:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a0, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a1, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a2, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a3, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a4, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a5, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a6, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a7, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t4, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t5, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t6, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: call standard_cc_func
+; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a1, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a2, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a3, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a4, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a5, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a6, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a7, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t4, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t5, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t6, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: preserve_mostcc1:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: addi sp, sp, -112
+; RV64I-NEXT: sd ra, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t0, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: call standard_cc_func
+; RV64I-NEXT: ld ra, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t0, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 112
+; RV64I-NEXT: ret
+;
+; RV32E-LABEL: preserve_mostcc1:
+; RV32E: # %bb.0: # %entry
+; RV32E-NEXT: addi sp, sp, -32
+; RV32E-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw t0, 24(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw a1, 16(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw a3, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw a4, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw a5, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT: call standard_cc_func
+; RV32E-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw t0, 24(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw a1, 16(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw a2, 12(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw a3, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw a4, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw a5, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT: addi sp, sp, 32
+; RV32E-NEXT: ret
+;
+; RV64E-LABEL: preserve_mostcc1:
+; RV64E: # %bb.0: # %entry
+; RV64E-NEXT: addi sp, sp, -64
+; RV64E-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd t0, 48(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd a1, 32(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd a2, 24(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd a3, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd a4, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd a5, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT: call standard_cc_func
+; RV64E-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld t0, 48(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld a1, 32(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld a2, 24(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld a3, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld a4, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld a5, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT: addi sp, sp, 64
+; RV64E-NEXT: ret
+entry:
+ call void @standard_cc_func()
+ ret void
+}
+
+define preserve_mostcc void @preserve_mostcc2() nounwind {
+; RV32I-LABEL: preserve_mostcc2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: call preserve_mostcc_func
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: preserve_mostcc2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: call preserve_mostcc_func
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV32E-LABEL: preserve_mostcc2:
+; RV32E: # %bb.0:
+; RV32E-NEXT: addi sp, sp, -4
+; RV32E-NEXT: sw ra, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT: call preserve_mostcc_func
+; RV32E-NEXT: lw ra, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT: addi sp, sp, 4
+; RV32E-NEXT: ret
+;
+; RV64E-LABEL: preserve_mostcc2:
+; RV64E: # %bb.0:
+; RV64E-NEXT: addi sp, sp, -8
+; RV64E-NEXT: sd ra, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT: call preserve_mostcc_func
+; RV64E-NEXT: ld ra, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT: addi sp, sp, 8
+; RV64E-NEXT: ret
+ call preserve_mostcc void @preserve_mostcc_func()
+ ret void
+}
+
+; X6, X7 and X28 will be saved to registers.
+define void @preserve_mostcc3() nounwind {
+; RV32I-LABEL: preserve_mostcc3:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: mv a0, t1
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: mv a1, t2
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: mv a2, t3
+; RV32I-NEXT: call preserve_mostcc_func
+; RV32I-NEXT: mv t1, a0
+; RV32I-NEXT: mv t2, a1
+; RV32I-NEXT: mv t3, a2
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: preserve_mostcc3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: mv a0, t1
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: mv a1, t2
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: mv a2, t3
+; RV64I-NEXT: call preserve_mostcc_func
+; RV64I-NEXT: mv t1, a0
+; RV64I-NEXT: mv t2, a1
+; RV64I-NEXT: mv t3, a2
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+;
+; RV32E-LABEL: preserve_mostcc3:
+; RV32E: # %bb.0:
+; RV32E-NEXT: addi sp, sp, -12
+; RV32E-NEXT: sw ra, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw s0, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw s1, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: mv a0, t1
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: mv a1, t2
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: mv a2, t3
+; RV32E-NEXT: call preserve_mostcc_func
+; RV32E-NEXT: mv t1, a0
+; RV32E-NEXT: mv t2, a1
+; RV32E-NEXT: mv t3, a2
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: lw ra, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw s0, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw s1, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT: addi sp, sp, 12
+; RV32E-NEXT: ret
+;
+; RV64E-LABEL: preserve_mostcc3:
+; RV64E: # %bb.0:
+; RV64E-NEXT: addi sp, sp, -24
+; RV64E-NEXT: sd ra, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: mv a0, t1
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: mv a1, t2
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: mv a2, t3
+; RV64E-NEXT: call preserve_mostcc_func
+; RV64E-NEXT: mv t1, a0
+; RV64E-NEXT: mv t2, a1
+; RV64E-NEXT: mv t3, a2
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: ld ra, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT: addi sp, sp, 24
+; RV64E-NEXT: ret
+ %1 = call i32 asm sideeffect "", "={x6}"() nounwind
+ %2 = call i32 asm sideeffect "", "={x7}"() nounwind
+ %3 = call i32 asm sideeffect "", "={x8}"() nounwind
+ %4 = call i32 asm sideeffect "", "={x9}"() nounwind
+ %5 = call i32 asm sideeffect "", "={x28}"() nounwind
+ call preserve_mostcc void @preserve_mostcc_func()
+ call void asm sideeffect "", "{x6},{x7},{x8},{x9},{x28}"(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5)
+ ret void
+}
+
+; X6, X7 and X28 will be saved to the stack.
+define void @preserve_mostcc4() nounwind {
+; RV32I-LABEL: preserve_mostcc4:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: mv s2, t1
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: mv s3, t2
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: mv s4, t3
+; RV32I-NEXT: call standard_cc_func
+; RV32I-NEXT: mv t1, s2
+; RV32I-NEXT: mv t2, s3
+; RV32I-NEXT: mv t3, s4
+; RV32I-NEXT: #APP
+; RV32I-NEXT: #NO_APP
+; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: preserve_mostcc4:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -48
+; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: mv s2, t1
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: mv s3, t2
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: mv s4, t3
+; RV64I-NEXT: call standard_cc_func
+; RV64I-NEXT: mv t1, s2
+; RV64I-NEXT: mv t2, s3
+; RV64I-NEXT: mv t3, s4
+; RV64I-NEXT: #APP
+; RV64I-NEXT: #NO_APP
+; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s4, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 48
+; RV64I-NEXT: ret
+;
+; RV32E-LABEL: preserve_mostcc4:
+; RV32E: # %bb.0:
+; RV32E-NEXT: addi sp, sp, -24
+; RV32E-NEXT: sw ra, 20(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw s0, 16(sp) # 4-byte Folded Spill
+; RV32E-NEXT: sw s1, 12(sp) # 4-byte Folded Spill
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: sw t1, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: sw t2, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: sw t3, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT: call standard_cc_func
+; RV32E-NEXT: lw t1, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw t2, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw t3, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT: #APP
+; RV32E-NEXT: #NO_APP
+; RV32E-NEXT: lw ra, 20(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
+; RV32E-NEXT: lw s1, 12(sp) # 4-byte Folded Reload
+; RV32E-NEXT: addi sp, sp, 24
+; RV32E-NEXT: ret
+;
+; RV64E-LABEL: preserve_mostcc4:
+; RV64E: # %bb.0:
+; RV64E-NEXT: addi sp, sp, -48
+; RV64E-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64E-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: sd t1, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: sd t2, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: sd t3, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT: call standard_cc_func
+; RV64E-NEXT: ld t1, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld t2, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld t3, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT: #APP
+; RV64E-NEXT: #NO_APP
+; RV64E-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64E-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64E-NEXT: addi sp, sp, 48
+; RV64E-NEXT: ret
+ %1 = call i32 asm sideeffect "", "={x6}"() nounwind
+ %2 = call i32 asm sideeffect "", "={x7}"() nounwind
+ %3 = call i32 asm sideeffect "", "={x8}"() nounwind
+ %4 = call i32 asm sideeffect "", "={x9}"() nounwind
+ %5 = call i32 asm sideeffect "", "={x28}"() nounwind
+ call void @standard_cc_func()
+ call void asm sideeffect "", "{x6},{x7},{x8},{x9},{x28}"(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..6e1fe92
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type operand bundle.
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..1f91f41
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll
@@ -0,0 +1,21 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type operand bundle.
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/memset-inline.ll b/llvm/test/CodeGen/RISCV/memset-inline.ll
index 1263892..4091524 100644
--- a/llvm/test/CodeGen/RISCV/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memset-inline.ll
@@ -684,13 +684,13 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @bzero_1(ptr %a) nounwind {
-; RV32-BOTH-LABEL: bzero_1:
+define void @memset_zero_1(ptr %a) nounwind {
+; RV32-BOTH-LABEL: memset_zero_1:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sb zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: bzero_1:
+; RV64-BOTH-LABEL: memset_zero_1:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sb zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -698,25 +698,25 @@ define void @bzero_1(ptr %a) nounwind {
ret void
}
-define void @bzero_2(ptr %a) nounwind {
-; RV32-LABEL: bzero_2:
+define void @memset_zero_2(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_2:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_2:
+; RV64-LABEL: memset_zero_2:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_2:
+; RV32-FAST-LABEL: memset_zero_2:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sh zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_2:
+; RV64-FAST-LABEL: memset_zero_2:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sh zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -724,8 +724,8 @@ define void @bzero_2(ptr %a) nounwind {
ret void
}
-define void @bzero_4(ptr %a) nounwind {
-; RV32-LABEL: bzero_4:
+define void @memset_zero_4(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_4:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
@@ -733,7 +733,7 @@ define void @bzero_4(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_4:
+; RV64-LABEL: memset_zero_4:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
@@ -741,12 +741,12 @@ define void @bzero_4(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_4:
+; RV32-FAST-LABEL: memset_zero_4:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_4:
+; RV64-FAST-LABEL: memset_zero_4:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -754,8 +754,8 @@ define void @bzero_4(ptr %a) nounwind {
ret void
}
-define void @bzero_8(ptr %a) nounwind {
-; RV32-LABEL: bzero_8:
+define void @memset_zero_8(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_8:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 4(a0)
; RV32-NEXT: sb zero, 5(a0)
@@ -767,7 +767,7 @@ define void @bzero_8(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_8:
+; RV64-LABEL: memset_zero_8:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 4(a0)
; RV64-NEXT: sb zero, 5(a0)
@@ -779,13 +779,13 @@ define void @bzero_8(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_8:
+; RV32-FAST-LABEL: memset_zero_8:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_8:
+; RV64-FAST-LABEL: memset_zero_8:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -793,8 +793,8 @@ define void @bzero_8(ptr %a) nounwind {
ret void
}
-define void @bzero_16(ptr %a) nounwind {
-; RV32-LABEL: bzero_16:
+define void @memset_zero_16(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_16:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 12(a0)
; RV32-NEXT: sb zero, 13(a0)
@@ -814,7 +814,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_16:
+; RV64-LABEL: memset_zero_16:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 12(a0)
; RV64-NEXT: sb zero, 13(a0)
@@ -834,7 +834,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_16:
+; RV32-FAST-LABEL: memset_zero_16:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
@@ -842,7 +842,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_16:
+; RV64-FAST-LABEL: memset_zero_16:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: sd zero, 8(a0)
@@ -851,8 +851,8 @@ define void @bzero_16(ptr %a) nounwind {
ret void
}
-define void @bzero_32(ptr %a) nounwind {
-; RV32-LABEL: bzero_32:
+define void @memset_zero_32(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_32:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 28(a0)
; RV32-NEXT: sb zero, 29(a0)
@@ -888,7 +888,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_32:
+; RV64-LABEL: memset_zero_32:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 28(a0)
; RV64-NEXT: sb zero, 29(a0)
@@ -924,7 +924,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_32:
+; RV32-FAST-LABEL: memset_zero_32:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 16(a0)
; RV32-FAST-NEXT: sw zero, 20(a0)
@@ -936,7 +936,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_32:
+; RV64-FAST-LABEL: memset_zero_32:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: sd zero, 8(a0)
@@ -947,8 +947,8 @@ define void @bzero_32(ptr %a) nounwind {
ret void
}
-define void @bzero_64(ptr %a) nounwind {
-; RV32-LABEL: bzero_64:
+define void @memset_zero_64(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_64:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 60(a0)
; RV32-NEXT: sb zero, 61(a0)
@@ -1016,7 +1016,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_64:
+; RV64-LABEL: memset_zero_64:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 60(a0)
; RV64-NEXT: sb zero, 61(a0)
@@ -1084,7 +1084,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_64:
+; RV32-FAST-LABEL: memset_zero_64:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 48(a0)
; RV32-FAST-NEXT: sw zero, 52(a0)
@@ -1104,7 +1104,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_64:
+; RV64-FAST-LABEL: memset_zero_64:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 32(a0)
; RV64-FAST-NEXT: sd zero, 40(a0)
@@ -1121,13 +1121,13 @@ define void @bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @aligned_bzero_2(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_2:
+define void @aligned_memset_zero_2(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_2:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_2:
+; RV64-BOTH-LABEL: aligned_memset_zero_2:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1135,13 +1135,13 @@ define void @aligned_bzero_2(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_4(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_4:
+define void @aligned_memset_zero_4(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_4:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_4:
+; RV64-BOTH-LABEL: aligned_memset_zero_4:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sw zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1149,14 +1149,14 @@ define void @aligned_bzero_4(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_8(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_8:
+define void @aligned_memset_zero_8(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_8:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_8:
+; RV64-BOTH-LABEL: aligned_memset_zero_8:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1165,8 +1165,8 @@ define void @aligned_bzero_8(ptr %a) nounwind {
}
-define void @aligned_bzero_16(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_16:
+define void @aligned_memset_zero_16(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_16:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
@@ -1174,7 +1174,7 @@ define void @aligned_bzero_16(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_16:
+; RV64-BOTH-LABEL: aligned_memset_zero_16:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: sd zero, 8(a0)
@@ -1183,8 +1183,8 @@ define void @aligned_bzero_16(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_32(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_32:
+define void @aligned_memset_zero_32(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_32:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 16(a0)
; RV32-BOTH-NEXT: sw zero, 20(a0)
@@ -1196,7 +1196,7 @@ define void @aligned_bzero_32(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_32:
+; RV64-BOTH-LABEL: aligned_memset_zero_32:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: sd zero, 8(a0)
@@ -1207,8 +1207,8 @@ define void @aligned_bzero_32(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_64(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_64:
+define void @aligned_memset_zero_64(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_64:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 48(a0)
; RV32-BOTH-NEXT: sw zero, 52(a0)
@@ -1228,7 +1228,7 @@ define void @aligned_bzero_64(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_64:
+; RV64-BOTH-LABEL: aligned_memset_zero_64:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 32(a0)
; RV64-BOTH-NEXT: sd zero, 40(a0)
@@ -1247,28 +1247,28 @@ define void @aligned_bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
; Usual overlap tricks
-define void @aligned_bzero_7(ptr %a) nounwind {
-; RV32-LABEL: aligned_bzero_7:
+define void @aligned_memset_zero_7(ptr %a) nounwind {
+; RV32-LABEL: aligned_memset_zero_7:
; RV32: # %bb.0:
; RV32-NEXT: sw zero, 0(a0)
; RV32-NEXT: sh zero, 4(a0)
; RV32-NEXT: sb zero, 6(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: aligned_bzero_7:
+; RV64-LABEL: aligned_memset_zero_7:
; RV64: # %bb.0:
; RV64-NEXT: sw zero, 0(a0)
; RV64-NEXT: sh zero, 4(a0)
; RV64-NEXT: sb zero, 6(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: aligned_bzero_7:
+; RV32-FAST-LABEL: aligned_memset_zero_7:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 3(a0)
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: aligned_bzero_7:
+; RV64-FAST-LABEL: aligned_memset_zero_7:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 3(a0)
; RV64-FAST-NEXT: sw zero, 0(a0)
@@ -1277,8 +1277,8 @@ define void @aligned_bzero_7(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_15(ptr %a) nounwind {
-; RV32-LABEL: aligned_bzero_15:
+define void @aligned_memset_zero_15(ptr %a) nounwind {
+; RV32-LABEL: aligned_memset_zero_15:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 14(a0)
; RV32-NEXT: sw zero, 0(a0)
@@ -1287,7 +1287,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV32-NEXT: sh zero, 12(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: aligned_bzero_15:
+; RV64-LABEL: aligned_memset_zero_15:
; RV64: # %bb.0:
; RV64-NEXT: sd zero, 0(a0)
; RV64-NEXT: sw zero, 8(a0)
@@ -1295,7 +1295,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV64-NEXT: sb zero, 14(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: aligned_bzero_15:
+; RV32-FAST-LABEL: aligned_memset_zero_15:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 11(a0)
; RV32-FAST-NEXT: sw zero, 0(a0)
@@ -1303,7 +1303,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 8(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: aligned_bzero_15:
+; RV64-FAST-LABEL: aligned_memset_zero_15:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 7(a0)
; RV64-FAST-NEXT: sd zero, 0(a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 5747bbb..bd37443 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -554,9 +554,8 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
; VLA-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; VLA-NEXT: vmv.v.i v10, 0
; VLA-NEXT: vmv1r.v v0, v8
-; VLA-NEXT: vmerge.vim v8, v10, 1, v0
; VLA-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
-; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vmerge.vim v9, v10, 1, v0
; VLA-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; VLA-NEXT: vmsne.vi v0, v9, 0
; VLA-NEXT: ret
@@ -568,9 +567,8 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
; VLS-NEXT: vmv.v.i v9, 0
; VLS-NEXT: vmerge.vim v10, v9, 1, v0
; VLS-NEXT: vmv1r.v v0, v8
-; VLS-NEXT: vmerge.vim v8, v9, 1, v0
; VLS-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
-; VLS-NEXT: vmv.v.v v10, v8
+; VLS-NEXT: vmerge.vim v10, v9, 1, v0
; VLS-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; VLS-NEXT: vmsne.vi v0, v10, 0
; VLS-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 9694912..6eb0b69 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -1883,7 +1883,8 @@ define void @store_factor4_one_active_slidedown(ptr %ptr, <4 x i32> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vsseg4e32.v v8, (a0)
+; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsse32.v v8, (a0), a1
; CHECK-NEXT: ret
%v0 = shufflevector <4 x i32> %v, <4 x i32> poison, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 2, i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 undef, i32 4, i32 undef, i32 undef, i32 undef>
store <16 x i32> %v0, ptr %ptr
diff --git a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
index 8963940..2c11bd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
@@ -360,13 +360,13 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @bzero_1(ptr %a) nounwind {
-; RV32-BOTH-LABEL: bzero_1:
+define void @memset_zero_1(ptr %a) nounwind {
+; RV32-BOTH-LABEL: memset_zero_1:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sb zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: bzero_1:
+; RV64-BOTH-LABEL: memset_zero_1:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sb zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -374,25 +374,25 @@ define void @bzero_1(ptr %a) nounwind {
ret void
}
-define void @bzero_2(ptr %a) nounwind {
-; RV32-LABEL: bzero_2:
+define void @memset_zero_2(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_2:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_2:
+; RV64-LABEL: memset_zero_2:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_2:
+; RV32-FAST-LABEL: memset_zero_2:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sh zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_2:
+; RV64-FAST-LABEL: memset_zero_2:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sh zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -400,8 +400,8 @@ define void @bzero_2(ptr %a) nounwind {
ret void
}
-define void @bzero_4(ptr %a) nounwind {
-; RV32-LABEL: bzero_4:
+define void @memset_zero_4(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_4:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
@@ -409,7 +409,7 @@ define void @bzero_4(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_4:
+; RV64-LABEL: memset_zero_4:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
@@ -417,12 +417,12 @@ define void @bzero_4(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_4:
+; RV32-FAST-LABEL: memset_zero_4:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_4:
+; RV64-FAST-LABEL: memset_zero_4:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -430,8 +430,8 @@ define void @bzero_4(ptr %a) nounwind {
ret void
}
-define void @bzero_8(ptr %a) nounwind {
-; RV32-LABEL: bzero_8:
+define void @memset_zero_8(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_8:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 4(a0)
; RV32-NEXT: sb zero, 5(a0)
@@ -443,7 +443,7 @@ define void @bzero_8(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_8:
+; RV64-LABEL: memset_zero_8:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 4(a0)
; RV64-NEXT: sb zero, 5(a0)
@@ -455,13 +455,13 @@ define void @bzero_8(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_8:
+; RV32-FAST-LABEL: memset_zero_8:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_8:
+; RV64-FAST-LABEL: memset_zero_8:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -469,29 +469,29 @@ define void @bzero_8(ptr %a) nounwind {
ret void
}
-define void @bzero_16(ptr %a) nounwind {
-; RV32-LABEL: bzero_16:
+define void @memset_zero_16(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_16:
+; RV64-LABEL: memset_zero_16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_16:
+; RV32-FAST-LABEL: memset_zero_16:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_16:
+; RV64-FAST-LABEL: memset_zero_16:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -501,8 +501,8 @@ define void @bzero_16(ptr %a) nounwind {
ret void
}
-define void @bzero_32(ptr %a) nounwind {
-; RV32-LABEL: bzero_32:
+define void @memset_zero_32(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
@@ -511,7 +511,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_32:
+; RV64-LABEL: memset_zero_32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
@@ -520,7 +520,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_32:
+; RV32-FAST-LABEL: memset_zero_32:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
@@ -529,7 +529,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_32:
+; RV64-FAST-LABEL: memset_zero_32:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -541,8 +541,8 @@ define void @bzero_32(ptr %a) nounwind {
ret void
}
-define void @bzero_64(ptr %a) nounwind {
-; RV32-LABEL: bzero_64:
+define void @memset_zero_64(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_64:
; RV32: # %bb.0:
; RV32-NEXT: li a1, 64
; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -550,7 +550,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_64:
+; RV64-LABEL: memset_zero_64:
; RV64: # %bb.0:
; RV64-NEXT: li a1, 64
; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -558,14 +558,14 @@ define void @bzero_64(ptr %a) nounwind {
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_64:
+; RV32-FAST-LABEL: memset_zero_64:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_64:
+; RV64-FAST-LABEL: memset_zero_64:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -577,13 +577,13 @@ define void @bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @aligned_bzero_2(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_2:
+define void @aligned_memset_zero_2(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_2:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_2:
+; RV64-BOTH-LABEL: aligned_memset_zero_2:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -591,13 +591,13 @@ define void @aligned_bzero_2(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_4(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_4:
+define void @aligned_memset_zero_4(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_4:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_4:
+; RV64-BOTH-LABEL: aligned_memset_zero_4:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sw zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -605,14 +605,14 @@ define void @aligned_bzero_4(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_8(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_8:
+define void @aligned_memset_zero_8(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_8:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_8:
+; RV64-BOTH-LABEL: aligned_memset_zero_8:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -621,15 +621,15 @@ define void @aligned_bzero_8(ptr %a) nounwind {
}
-define void @aligned_bzero_16(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_16:
+define void @aligned_memset_zero_16(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_16:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_16:
+; RV64-BOTH-LABEL: aligned_memset_zero_16:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -639,8 +639,8 @@ define void @aligned_bzero_16(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_32(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_32:
+define void @aligned_memset_zero_32(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_32:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -649,7 +649,7 @@ define void @aligned_bzero_32(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_32:
+; RV64-BOTH-LABEL: aligned_memset_zero_32:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -661,15 +661,15 @@ define void @aligned_bzero_32(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_64(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_64:
+define void @aligned_memset_zero_64(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_64:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_64:
+; RV64-BOTH-LABEL: aligned_memset_zero_64:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -679,8 +679,8 @@ define void @aligned_bzero_64(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_66(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_66:
+define void @aligned_memset_zero_66(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_66:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 64(a0)
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -688,7 +688,7 @@ define void @aligned_bzero_66(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_66:
+; RV64-BOTH-LABEL: aligned_memset_zero_66:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 64(a0)
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -699,8 +699,8 @@ define void @aligned_bzero_66(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_96(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_96:
+define void @aligned_memset_zero_96(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_96:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -713,7 +713,7 @@ define void @aligned_bzero_96(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_96:
+; RV64-BOTH-LABEL: aligned_memset_zero_96:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -729,15 +729,15 @@ define void @aligned_bzero_96(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_128(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_128:
+define void @aligned_memset_zero_128(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_128:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_128:
+; RV64-BOTH-LABEL: aligned_memset_zero_128:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -747,8 +747,8 @@ define void @aligned_bzero_128(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_256(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_256:
+define void @aligned_memset_zero_256(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_256:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -757,7 +757,7 @@ define void @aligned_bzero_256(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_256:
+; RV64-BOTH-LABEL: aligned_memset_zero_256:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr141907.ll b/llvm/test/CodeGen/RISCV/rvv/pr141907.ll
index 648b47d..f93f88a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr141907.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr141907.ll
@@ -9,27 +9,29 @@ define void @pr141907(ptr %0) nounwind {
; CHECK-NEXT: slli a1, a1, 2
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmclr.m v0
; CHECK-NEXT: li a1, 0
-; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: addi a3, sp, 20
+; CHECK-NEXT: li a4, 12
; CHECK-NEXT: .LBB0_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vs4r.v v8, (a2)
; CHECK-NEXT: vsetvli a1, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma
-; CHECK-NEXT: vnsrl.wi v11, v9, 0, v0.t
-; CHECK-NEXT: vsetvli a3, zero, e32, m1, ta, ma
-; CHECK-NEXT: vlseg3e32.v v8, (a2)
+; CHECK-NEXT: vnsrl.wi v9, v8, 0, v0.t
+; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma
+; CHECK-NEXT: vlse32.v v8, (a3), a4
; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma
-; CHECK-NEXT: vsseg2e16.v v11, (zero)
+; CHECK-NEXT: vsseg2e16.v v9, (zero)
; CHECK-NEXT: bnez a1, .LBB0_1
; CHECK-NEXT: .LBB0_2: # %while.body5
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vse16.v v9, (a0)
+; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: j .LBB0_2
entry:
br label %vector.body
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index fba592d..c4284bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -407,8 +407,9 @@ define { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x
define <vscale x 8 x i8> @vector_deinterleave_load_factor4_oneactive(ptr %p) {
; CHECK-LABEL: vector_deinterleave_load_factor4_oneactive:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vlseg4e8.v v8, (a0)
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlse8.v v8, (a0), a1
; CHECK-NEXT: ret
%vec = load <vscale x 32 x i8>, ptr %p
%d0 = call { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> } @llvm.vector.deinterleave4(<vscale x 32 x i8> %vec)
@@ -419,8 +420,10 @@ define <vscale x 8 x i8> @vector_deinterleave_load_factor4_oneactive(ptr %p) {
define <vscale x 8 x i8> @vector_deinterleave_load_factor4_oneactive2(ptr %p) {
; CHECK-LABEL: vector_deinterleave_load_factor4_oneactive2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vlseg4e8.v v5, (a0)
+; CHECK-NEXT: addi a0, a0, 3
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlse8.v v8, (a0), a1
; CHECK-NEXT: ret
%vec = load <vscale x 32 x i8>, ptr %p
%d0 = call { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> } @llvm.vector.deinterleave4(<vscale x 32 x i8> %vec)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index 5b1746d..ac9f263 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -3712,8 +3712,9 @@ define <vscale x 1 x float> @vector_deinterleave_nxv1f32_nxv8f32_oneactive(<vsca
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
-; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vlseg8e32.v v8, (a0)
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vlse32.v v8, (a0), a1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
@@ -3732,9 +3733,11 @@ define <vscale x 1 x float> @vector_deinterleave_nxv1f32_nxv8f32_oneactive2(<vsc
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: addi a1, sp, 36
; CHECK-NEXT: vs4r.v v8, (a0)
-; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vlseg8e32.v v3, (a0)
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vlse32.v v8, (a1), a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index 7e7d11e..2e2f12a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -326,3 +326,39 @@ define void @masked_store_factor3_masked(<vscale x 2 x i32> %a, <vscale x 2 x i3
call void @llvm.masked.store(<vscale x 6 x i32> %v, ptr %p, i32 4, <vscale x 6 x i1> %interleaved.mask)
ret void
}
+
+define void @store_factor2_oneactive(<vscale x 2 x i32> %a, ptr %p) {
+; CHECK-LABEL: store_factor2_oneactive:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsseg2e32.v v8, (a0)
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i32> @llvm.vector.interleave2(<vscale x 2 x i32> %a, <vscale x 2 x i32> poison)
+ store <vscale x 4 x i32> %v, ptr %p
+ ret void
+}
+
+define void @store_factor3_oneactive(<vscale x 2 x i32> %a, ptr %p) {
+; CHECK-LABEL: store_factor3_oneactive:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 12
+; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %v = call <vscale x 6 x i32> @llvm.vector.interleave3(<vscale x 2 x i32> %a, <vscale x 2 x i32> poison, <vscale x 2 x i32> poison)
+ store <vscale x 6 x i32> %v, ptr %p
+ ret void
+}
+
+define void @store_factor7_oneactive(<vscale x 2 x i32> %a, ptr %p) {
+; CHECK-LABEL: store_factor7_oneactive:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, 24
+; CHECK-NEXT: li a1, 28
+; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+ %v = call <vscale x 14 x i32> @llvm.vector.interleave7(<vscale x 2 x i32> poison, <vscale x 2 x i32> poison, <vscale x 2 x i32> poison, <vscale x 2 x i32> poison, <vscale x 2 x i32> poison, <vscale x 2 x i32> poison, <vscale x 2 x i32> %a)
+ store <vscale x 14 x i32> %v, ptr %p
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 52cd3e3..bfa4067 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -8,8 +8,10 @@ body: |
; CHECK-LABEL: name: vop_vi
; CHECK: %x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vop_vi_incompatible_eew
@@ -18,8 +20,10 @@ body: |
; CHECK-LABEL: name: vop_vi_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vop_vi_incompatible_emul
@@ -28,8 +32,10 @@ body: |
; CHECK-LABEL: name: vop_vi_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vop_vv
@@ -38,8 +44,10 @@ body: |
; CHECK-LABEL: name: vop_vv
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vop_vv_incompatible_eew
@@ -48,9 +56,10 @@ body: |
; CHECK-LABEL: name: vop_vv_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
-
+ $v8 = COPY %y
...
---
name: vop_vv_incompatible_emul
@@ -59,8 +68,10 @@ body: |
; CHECK-LABEL: name: vop_vv_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwop_vv_vd
@@ -69,8 +80,10 @@ body: |
; CHECK-LABEL: name: vwop_vv_vd
; CHECK: early-clobber %x:vr = PseudoVWADD_VV_MF2 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVWADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vwop_vv_vd_incompatible_eew
@@ -79,8 +92,10 @@ body: |
; CHECK-LABEL: name: vwop_vv_vd_incompatible_eew
; CHECK: early-clobber %x:vr = PseudoVWADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVWADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwop_vv_vd_incompatible_emul
@@ -89,8 +104,10 @@ body: |
; CHECK-LABEL: name: vwop_vv_vd_incompatible_emul
; CHECK: early-clobber %x:vr = PseudoVWADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVWADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 4 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwop_vv_vd_passthru_use
@@ -100,9 +117,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVWADD_VV_MF2 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWADD_VV_MF2 %x, $noreg, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vwop_vv_vd_passthru_use_incompatible_eew
@@ -112,9 +131,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVWADD_VV_MF2 %x, $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWADD_VV_MF2 %x, $noreg, $noreg, 1, 4 /* e16 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vwop_vv_vd_passthru_use_incompatible_emul
@@ -124,9 +145,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVWADD_VV_MF4 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWADD_VV_MF4 %x, $noreg, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vwop_vv_vs2
@@ -135,8 +158,10 @@ body: |
; CHECK-LABEL: name: vwop_vv_vs2
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrm2 = PseudoVWADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: vwop_vv_vs2_incompatible_eew
@@ -145,8 +170,10 @@ body: |
; CHECK-LABEL: name: vwop_vv_vs2_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrm2 = PseudoVWADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8m2 = COPY %y
...
---
name: vwop_vv_vs2_incompatible_emul
@@ -155,8 +182,10 @@ body: |
; CHECK-LABEL: name: vwop_vv_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVWADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVWADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwop_vv_vs1
@@ -165,8 +194,10 @@ body: |
; CHECK-LABEL: name: vwop_vv_vs1
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrm2 = PseudoVWADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: vwop_vv_vs1_incompatible_eew
@@ -175,8 +206,10 @@ body: |
; CHECK-LABEL: name: vwop_vv_vs1_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_VV_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrm2 = PseudoVWADD_VV_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0
+ $v8m2 = COPY %y
...
---
name: vwop_vv_vs1_incompatible_emul
@@ -185,8 +218,10 @@ body: |
; CHECK-LABEL: name: vwop_vv_vs1_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVWADD_VV_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVWADD_VV_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwop_wv_vd
@@ -195,8 +230,10 @@ body: |
; CHECK-LABEL: name: vwop_wv_vd
; CHECK: early-clobber %x:vr = PseudoVWADD_WV_MF2 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVWADD_WV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vwop_wv_vd_incompatible_eew
@@ -205,8 +242,10 @@ body: |
; CHECK-LABEL: name: vwop_wv_vd_incompatible_eew
; CHECK: early-clobber %x:vr = PseudoVWADD_WV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVWADD_WV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwop_wv_vd_incompatible_emul
@@ -215,8 +254,10 @@ body: |
; CHECK-LABEL: name: vwop_wv_vd_incompatible_emul
; CHECK: early-clobber %x:vr = PseudoVWADD_WV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVWADD_WV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 4 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwop_wv_vd_passthru_use
@@ -226,9 +267,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVWADD_WV_MF2 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWADD_WV_MF2 %x, $noreg, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vwop_wv_vd_passthru_use_incompatible_eew
@@ -238,9 +281,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVWADD_WV_MF2 %x, $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWADD_WV_MF2 %x, $noreg, $noreg, 1, 4 /* e16 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vwop_wv_vd_passthru_use_incompatible_emul
@@ -250,9 +295,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVWADD_WV_MF4 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWADD_WV_MF4 %x, $noreg, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vwop_wv_vs2
@@ -261,8 +308,10 @@ body: |
; CHECK-LABEL: name: vwop_wv_vs2
; CHECK: %x:vrm2 = PseudoVADD_VV_M2 $noreg, $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vrm2 = PseudoVADD_VV_M2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vrm2 = PseudoVWADD_WV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: vwop_wv_vs2_incompatible_eew
@@ -271,8 +320,10 @@ body: |
; CHECK-LABEL: name: vwop_wv_vs2_incompatible_eew
; CHECK: %x:vrm2 = PseudoVADD_VV_M2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vrm2 = PseudoVADD_VV_M2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrm2 = PseudoVWADD_WV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: vwop_wv_vs2_incompatible_emul
@@ -281,8 +332,10 @@ body: |
; CHECK-LABEL: name: vwop_wv_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVWADD_WV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWADD_WV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwop_wv_vs1
@@ -291,8 +344,10 @@ body: |
; CHECK-LABEL: name: vwop_wv_vs1
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrm2 = PseudoVWADD_WV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: vwop_wv_vs1_incompatible_eew
@@ -301,8 +356,10 @@ body: |
; CHECK-LABEL: name: vwop_wv_vs1_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vrm2 = PseudoVWADD_WV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: vwop_wv_vs1_incompatible_emul
@@ -311,8 +368,10 @@ body: |
; CHECK-LABEL: name: vwop_wv_vs1_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrm2 = PseudoVWADD_WV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: tied_vwop_wv_vs1
@@ -321,8 +380,10 @@ body: |
; CHECK-LABEL: name: tied_vwop_wv_vs1
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: tied_vwop_wv_vs1_incompatible_eew
@@ -331,8 +392,10 @@ body: |
; CHECK-LABEL: name: tied_vwop_wv_vs1_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: tied_vwop_wv_vs1_incompatible_emul
@@ -341,8 +404,10 @@ body: |
; CHECK-LABEL: name: tied_vwop_wv_vs1_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0
+ $v8m2 = COPY %y
...
---
name: vop_vf2_vd
@@ -351,8 +416,10 @@ body: |
; CHECK-LABEL: name: vop_vf2_vd
; CHECK: early-clobber %x:vr = PseudoVZEXT_VF2_M1 $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVZEXT_VF2_M1 $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf2_vd_incompatible_eew
@@ -361,8 +428,10 @@ body: |
; CHECK-LABEL: name: vop_vf2_vd_incompatible_eew
; CHECK: early-clobber %x:vr = PseudoVZEXT_VF2_M1 $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVZEXT_VF2_M1 $noreg, $noreg, -1, 5 /* e32 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf2_vd_incompatible_emul
@@ -371,8 +440,10 @@ body: |
; CHECK-LABEL: name: vop_vf2_vd_incompatible_emul
; CHECK: early-clobber %x:vr = PseudoVZEXT_VF2_MF2 $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVZEXT_VF2_MF2 $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf2_vs2
@@ -381,8 +452,10 @@ body: |
; CHECK-LABEL: name: vop_vf2_vs2
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVZEXT_VF2_M1 $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVZEXT_VF2_M1 $noreg, %x, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf2_vs2_incompatible_eew
@@ -391,8 +464,10 @@ body: |
; CHECK-LABEL: name: vop_vf2_vs2_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVZEXT_VF2_M1 $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVZEXT_VF2_M1 $noreg, %x, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf2_vs2_incompatible_emul
@@ -401,8 +476,10 @@ body: |
; CHECK-LABEL: name: vop_vf2_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVZEXT_VF2_M1 $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVZEXT_VF2_M1 $noreg, %x, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf4_vd
@@ -411,8 +488,10 @@ body: |
; CHECK-LABEL: name: vop_vf4_vd
; CHECK: early-clobber %x:vr = PseudoVZEXT_VF4_M1 $noreg, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVZEXT_VF4_M1 $noreg, $noreg, -1, 5 /* e32 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf4_vd_incompatible_eew
@@ -421,8 +500,10 @@ body: |
; CHECK-LABEL: name: vop_vf4_vd_incompatible_eew
; CHECK: early-clobber %x:vr = PseudoVZEXT_VF4_M1 $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVZEXT_VF4_M1 $noreg, $noreg, -1, 5 /* e32 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf4_vd_incompatible_emul
@@ -431,8 +512,10 @@ body: |
; CHECK-LABEL: name: vop_vf4_vd_incompatible_emul
; CHECK: early-clobber %x:vr = PseudoVZEXT_VF4_MF2 $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVZEXT_VF4_MF2 $noreg, $noreg, -1, 5 /* e32 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf4_vs2
@@ -441,8 +524,10 @@ body: |
; CHECK-LABEL: name: vop_vf4_vs2
; CHECK: %x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVZEXT_VF4_M1 $noreg, %x, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVZEXT_VF4_M1 $noreg, %x, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf4_vs2_incompatible_eew
@@ -451,8 +536,10 @@ body: |
; CHECK-LABEL: name: vop_vf4_vs2_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVZEXT_VF4_M1 $noreg, %x, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVZEXT_VF4_M1 $noreg, %x, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf4_vs2_incompatible_emul
@@ -461,8 +548,10 @@ body: |
; CHECK-LABEL: name: vop_vf4_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVZEXT_VF4_M1 $noreg, %x, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVZEXT_VF4_M1 $noreg, %x, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf8_vd
@@ -471,8 +560,10 @@ body: |
; CHECK-LABEL: name: vop_vf8_vd
; CHECK: early-clobber %x:vr = PseudoVZEXT_VF8_M1 $noreg, $noreg, 1, 6 /* e64 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 6 /* e64 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVZEXT_VF8_M1 $noreg, $noreg, -1, 6 /* e64 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 6 /* e64 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf8_vd_incompatible_eew
@@ -481,8 +572,10 @@ body: |
; CHECK-LABEL: name: vop_vf8_vd_incompatible_eew
; CHECK: early-clobber %x:vr = PseudoVZEXT_VF8_M1 $noreg, $noreg, -1, 6 /* e64 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVZEXT_VF8_M1 $noreg, $noreg, -1, 6 /* e64 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf8_vd_incompatible_emul
@@ -491,8 +584,10 @@ body: |
; CHECK-LABEL: name: vop_vf8_vd_incompatible_emul
; CHECK: early-clobber %x:vr = PseudoVZEXT_VF8_M1 $noreg, $noreg, -1, 6 /* e64 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 6 /* e64 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVZEXT_VF8_M1 $noreg, $noreg, -1, 6 /* e64 */, 0
%y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 6 /* e64 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf8_vs2
@@ -501,8 +596,10 @@ body: |
; CHECK-LABEL: name: vop_vf8_vs2
; CHECK: %x:vr = PseudoVADD_VV_MF8 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVZEXT_VF8_M1 $noreg, %x, 1, 6 /* e64 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF8 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVZEXT_VF8_M1 $noreg, %x, 1, 6 /* e64 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf8_vs2_incompatible_eew
@@ -511,8 +608,10 @@ body: |
; CHECK-LABEL: name: vop_vf8_vs2_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_MF8 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVZEXT_VF8_M1 $noreg, %x, 1, 6 /* e64 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF8 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVZEXT_VF8_M1 $noreg, %x, 1, 6 /* e64 */, 0
+ $v8 = COPY %y
...
---
name: vop_vf8_vs2_incompatible_emul
@@ -521,8 +620,10 @@ body: |
; CHECK-LABEL: name: vop_vf8_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVZEXT_VF8_M1 $noreg, %x, 1, 6 /* e64 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVZEXT_VF8_M1 $noreg, %x, 1, 6 /* e64 */, 0
+ $v8 = COPY %y
...
---
name: vnop_wv_vd
@@ -531,8 +632,10 @@ body: |
; CHECK-LABEL: name: vnop_wv_vd
; CHECK: early-clobber %x:vr = PseudoVNSRL_WV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVNSRL_WV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vnop_wv_vd_unsupported_eew
@@ -541,8 +644,10 @@ body: |
; CHECK-LABEL: name: vnop_wv_vd_unsupported_eew
; CHECK: early-clobber %x:vr = PseudoVNSRL_WV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVNSRL_WV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vnop_wv_vd_unsupported_emul
@@ -551,8 +656,10 @@ body: |
; CHECK-LABEL: name: vnop_wv_vd_unsupported_emul
; CHECK: %x:vr = PseudoVNSRL_WV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVNSRL_WV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vnop_wv_vd_passthru_use
@@ -562,9 +669,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVNSRL_WV_M1 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVNSRL_WV_M1 %x, $noreg, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %z
...
---
name: vnop_wv_vd_passthru_use_incompatible_eew
@@ -574,9 +683,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVNSRL_WV_M1 %x, $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVNSRL_WV_M1 %x, $noreg, $noreg, 1, 4 /* e16 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vnop_wv_vd_passthru_use_unsupported_emul
@@ -586,9 +697,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVNSRL_WV_MF2 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVNSRL_WV_MF2 %x, $noreg, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %z
...
---
name: vnop_wv_vs2
@@ -597,8 +710,10 @@ body: |
; CHECK-LABEL: name: vnop_wv_vs2
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVNSRL_WV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVNSRL_WV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vnop_wv_vs2_incompatible_eew
@@ -607,8 +722,10 @@ body: |
; CHECK-LABEL: name: vnop_wv_vs2_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVNSRL_WV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVNSRL_WV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vnop_wv_vs2_incompatible_emul
@@ -617,8 +734,10 @@ body: |
; CHECK-LABEL: name: vnop_wv_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVNSRL_WV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVNSRL_WV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vnop_wv_vs1
@@ -627,8 +746,10 @@ body: |
; CHECK-LABEL: name: vnop_wv_vs1
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVNSRL_WV_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVNSRL_WV_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vnop_wv_vs1_incompatible_eew
@@ -637,8 +758,10 @@ body: |
; CHECK-LABEL: name: vnop_wv_vs1_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVNSRL_WV_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVNSRL_WV_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vnop_wv_vs1_incompatible_emul
@@ -647,8 +770,10 @@ body: |
; CHECK-LABEL: name: vnop_wv_vs1_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVNSRL_WV_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVNSRL_WV_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vfnop_vs2
@@ -657,8 +782,10 @@ body: |
; CHECK-LABEL: name: vfnop_vs2
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVFNCVT_X_F_W_MF2 $noreg, %x, 0, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
early-clobber %y:vr = PseudoVFNCVT_X_F_W_MF2 $noreg, %x, 0, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vfnop_vs2_incompatible_eew
@@ -667,8 +794,10 @@ body: |
; CHECK-LABEL: name: vfnop_vs2_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVFNCVT_X_F_W_MF2 $noreg, %x, 0, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
early-clobber %y:vr = PseudoVFNCVT_X_F_W_MF2 $noreg, %x, 0, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vfnop_vs2_incompatible_emul
@@ -677,8 +806,10 @@ body: |
; CHECK-LABEL: name: vfnop_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVFNCVT_X_F_W_MF2 $noreg, %x, 0, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
early-clobber %y:vr = PseudoVFNCVT_X_F_W_MF2 $noreg, %x, 0, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vseN_v
@@ -737,8 +868,10 @@ body: |
; CHECK-LABEL: name: vleN_v
; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vleN_v_incompatible_eew
@@ -747,8 +880,10 @@ body: |
; CHECK-LABEL: name: vleN_v_incompatible_eew
; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vleN_v_incompatible_emul
@@ -757,8 +892,10 @@ body: |
; CHECK-LABEL: name: vleN_v_incompatible_emul
; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vlm_v
@@ -767,8 +904,10 @@ body: |
; CHECK-LABEL: name: vlm_v
; CHECK: %x:vr = PseudoVLM_V_B8 $noreg, $noreg, 1, 0 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0, 0
%y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vlm_v_incompatible_eew
@@ -777,8 +916,10 @@ body: |
; CHECK-LABEL: name: vlm_v_incompatible_eew
; CHECK: %x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0, 0
%y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vlm_v_incompatible_emul
@@ -787,8 +928,10 @@ body: |
; CHECK-LABEL: name: vlm_v_incompatible_emul
; CHECK: %x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0, 0
%y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vsseN_v
@@ -887,8 +1030,10 @@ body: |
; CHECK-LABEL: name: vluxeiN_v_data
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vluxeiN_v_incompatible_eew
@@ -897,8 +1042,10 @@ body: |
; CHECK-LABEL: name: vluxeiN_v_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vluxeiN_v_data_incompatible_emul
@@ -907,8 +1054,10 @@ body: |
; CHECK-LABEL: name: vluxeiN_v_data_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVLUXEI8_V_MF2_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVLUXEI8_V_MF2_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vluxeiN_v_idx
@@ -917,8 +1066,10 @@ body: |
; CHECK-LABEL: name: vluxeiN_v_idx
; CHECK: %x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVLUXEI8_V_MF2_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVLUXEI8_V_MF2_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vluxeiN_v_idx_incompatible_eew
@@ -927,8 +1078,10 @@ body: |
; CHECK-LABEL: name: vluxeiN_v_idx_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vluxeiN_v_idx_incompatible_emul
@@ -937,8 +1090,10 @@ body: |
; CHECK-LABEL: name: vluxeiN_v_idx_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVLUXEI8_V_MF2_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVLUXEI8_V_MF2_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vluxeiN_v_vd
@@ -947,8 +1102,10 @@ body: |
; CHECK-LABEL: name: vluxeiN_v_vd
; CHECK: %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vluxeiN_v_vd_incompatible_eew
@@ -957,8 +1114,10 @@ body: |
; CHECK-LABEL: name: vluxeiN_v_vd_incompatible_eew
; CHECK: %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vluxeiN_vd_incompatible_emul
@@ -967,8 +1126,10 @@ body: |
; CHECK-LABEL: name: vluxeiN_vd_incompatible_emul
; CHECK: %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmop_mm
@@ -977,8 +1138,10 @@ body: |
; CHECK-LABEL: name: vmop_mm
; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
%y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vmop_mm_incompatible_eew
@@ -987,8 +1150,10 @@ body: |
; CHECK-LABEL: name: vmop_mm_incompatible_eew
; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
%y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmop_mm_incompatible_emul
@@ -997,8 +1162,10 @@ body: |
; CHECK-LABEL: name: vmop_mm_incompatible_emul
; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
%y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vmop_mm_mask
@@ -1007,8 +1174,10 @@ body: |
; CHECK-LABEL: name: vmop_mm_mask
; CHECK: %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
%y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmop_mm_mask_larger_emul_user
@@ -1017,8 +1186,10 @@ body: |
; CHECK-LABEL: name: vmop_mm_mask_larger_emul_user
; CHECK: %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */
; CHECK-NEXT: %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
%y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0
+ $v8m2 = COPY %y
...
---
name: vmop_mm_mask_incompatible_emul
@@ -1027,8 +1198,10 @@ body: |
; CHECK-LABEL: name: vmop_mm_mask_incompatible_emul
; CHECK: %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
%y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmop_vv
@@ -1037,8 +1210,10 @@ body: |
; CHECK-LABEL: name: vmop_vv
; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 3 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
%y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vmop_vv_maskuser
@@ -1047,8 +1222,10 @@ body: |
; CHECK-LABEL: name: vmop_vv_maskuser
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 3 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
%y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmop_vv_maskuser_incompatible_eew
@@ -1057,8 +1234,10 @@ body: |
; CHECK-LABEL: name: vmop_vv_maskuser_incompatible_eew
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
%y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vmop_vv_incompatible_emul
@@ -1067,8 +1246,10 @@ body: |
; CHECK-LABEL: name: vmop_vv_incompatible_emul
; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
%y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vmop_vv_maskuser_incompaible_emul
@@ -1077,8 +1258,10 @@ body: |
; CHECK-LABEL: name: vmop_vv_maskuser_incompaible_emul
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
%y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmop_vv_maskuser_larger_emul
@@ -1087,8 +1270,10 @@ body: |
; CHECK-LABEL: name: vmop_vv_maskuser_larger_emul
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 3 /* e8 */
; CHECK-NEXT: %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
%y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0
+ $v8m2 = COPY %y
...
---
name: vmop_vv_consumer_incompatible_eew
@@ -1097,8 +1282,10 @@ body: |
; CHECK-LABEL: name: vmop_vv_consumer_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMSEQ_VV_M1 $noreg, %x, 1, 4 /* e16 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVMSEQ_VV_M1 $noreg, %x, 1, 4 /* e16 */
+ $v8 = COPY %y
...
---
name: vmop_vv_consumer_incompatible_emul
@@ -1107,8 +1294,10 @@ body: |
; CHECK-LABEL: name: vmop_vv_consumer_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMSEQ_VV_MF2 $noreg, %x, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVMSEQ_VV_MF2 $noreg, %x, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmop_vv_passthru_use
@@ -1118,9 +1307,11 @@ body: |
; CHECK: %x:vrnov0 = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1 /* ta, mu */
; CHECK-NEXT: %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vrnov0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e1 */
%y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1
%z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e1 */
+ $v8 = COPY %z
...
---
name: vmop_vv_passthru_use_incompatible_eew
@@ -1130,9 +1321,11 @@ body: |
; CHECK: %x:vrnov0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1 /* ta, mu */
; CHECK-NEXT: %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vrnov0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1
%z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e1 */
+ $v8 = COPY %z
...
---
name: vmop_vv_passthru_use_incompatible_emul
@@ -1142,9 +1335,11 @@ body: |
; CHECK: %x:vrnov0 = PseudoVMAND_MM_B16 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1 /* ta, mu */
; CHECK-NEXT: %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vrnov0 = PseudoVMAND_MM_B16 $noreg, $noreg, -1, 0 /* e1 */
%y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1
%z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e1 */
+ $v8 = COPY %z
...
---
name: vmerge_vim
@@ -1153,8 +1348,10 @@ body: |
; CHECK-LABEL: name: vmerge_vim
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmerge_vim_incompatible_eew
@@ -1163,8 +1360,10 @@ body: |
; CHECK-LABEL: name: vmerge_vim_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmerge_vim_incompatible_emul
@@ -1173,8 +1372,10 @@ body: |
; CHECK-LABEL: name: vmerge_vim_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VIM_MF2 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrnov0 = PseudoVMERGE_VIM_MF2 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmerge_vxm
@@ -1183,8 +1384,10 @@ body: |
; CHECK-LABEL: name: vmerge_vxm
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmerge_vxm_incompatible_eew
@@ -1193,8 +1396,10 @@ body: |
; CHECK-LABEL: name: vmerge_vxm_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmerge_vxm_incompatible_emul
@@ -1203,8 +1408,10 @@ body: |
; CHECK-LABEL: name: vmerge_vxm_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VXM_MF2 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrnov0 = PseudoVMERGE_VXM_MF2 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmerge_vvm
@@ -1213,8 +1420,10 @@ body: |
; CHECK-LABEL: name: vmerge_vvm
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmerge_vvm_incompatible_eew
@@ -1223,8 +1432,10 @@ body: |
; CHECK-LABEL: name: vmerge_vvm_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmerge_vvm_incompatible_emul
@@ -1233,8 +1444,10 @@ body: |
; CHECK-LABEL: name: vmerge_vvm_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VVM_MF2 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vrnov0 = PseudoVMERGE_VVM_MF2 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+ $v8 = COPY %y
...
---
name: vmv_v_i
@@ -1243,8 +1456,10 @@ body: |
; CHECK-LABEL: name: vmv_v_i
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmv_v_i_incompatible_eew
@@ -1253,8 +1468,10 @@ body: |
; CHECK-LABEL: name: vmv_v_i_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmv_v_i_incompatible_emul
@@ -1263,8 +1480,10 @@ body: |
; CHECK-LABEL: name: vmv_v_i_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMV_V_I_MF2 %x, 9, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVMV_V_I_MF2 %x, 9, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmv_v_x
@@ -1273,8 +1492,10 @@ body: |
; CHECK-LABEL: name: vmv_v_x
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMV_V_X_M1 %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVMV_V_X_M1 %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmv_v_x_incompatible_eew
@@ -1283,8 +1504,10 @@ body: |
; CHECK-LABEL: name: vmv_v_x_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMV_V_X_M1 %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVMV_V_X_M1 %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmv_v_x_incompatible_emul
@@ -1293,8 +1516,10 @@ body: |
; CHECK-LABEL: name: vmv_v_x_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMV_V_X_MF2 %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVMV_V_X_MF2 %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmv_v_v
@@ -1303,8 +1528,10 @@ body: |
; CHECK-LABEL: name: vmv_v_v
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMV_V_V_M1 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVMV_V_V_M1 $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmv_v_v_incompatible_eew
@@ -1313,8 +1540,10 @@ body: |
; CHECK-LABEL: name: vmv_v_v_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMV_V_V_M1 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVMV_V_V_M1 $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmv_v_v_incompatible_emul
@@ -1323,8 +1552,10 @@ body: |
; CHECK-LABEL: name: vmv_v_v_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMV_V_V_MF2 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVMV_V_V_MF2 $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: viota_m_dest
@@ -1333,8 +1564,10 @@ body: |
; CHECK-LABEL: name: viota_m_dest
; CHECK: early-clobber %x:vr = PseudoVIOTA_M_M1 $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVIOTA_M_M1 $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: viota_m_dest_incompatible_eew
@@ -1343,8 +1576,10 @@ body: |
; CHECK-LABEL: name: viota_m_dest_incompatible_eew
; CHECK: early-clobber %x:vr = PseudoVIOTA_M_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVIOTA_M_M1 $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: viota_m_dest_incompatible_emul
@@ -1353,8 +1588,10 @@ body: |
; CHECK-LABEL: name: viota_m_dest_incompatible_emul
; CHECK: early-clobber %x:vr = PseudoVIOTA_M_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVIOTA_M_M1 $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: viota_m_dest_passthru_use
@@ -1364,9 +1601,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVIOTA_M_M1 %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVIOTA_M_M1 %x, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %z
...
---
name: viota_m_dest_passthru_use_incompatible_eew
@@ -1376,9 +1615,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVIOTA_M_M1 %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVIOTA_M_M1 %x, $noreg, 1, 4 /* e16 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: viota_m_dest_passthru_use_incompatible_emul
@@ -1388,9 +1629,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vr = PseudoVIOTA_M_MF2 %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVIOTA_M_MF2 %x, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %z
...
---
name: viota_m_mask
@@ -1399,8 +1642,10 @@ body: |
; CHECK-LABEL: name: viota_m_mask
; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 3 /* e8 */
; CHECK-NEXT: early-clobber %y:vr = PseudoVIOTA_M_M1 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
%y:vr = PseudoVIOTA_M_M1 $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: viota_m_mask_scale_mask
@@ -1409,8 +1654,10 @@ body: |
; CHECK-LABEL: name: viota_m_mask_scale_mask
; CHECK: early-clobber %x:vr = PseudoVMSEQ_VV_M2 $noreg, $noreg, 1, 4 /* e16 */
; CHECK-NEXT: early-clobber %y:vr = PseudoVIOTA_M_M1 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMSEQ_VV_M2 $noreg, $noreg, -1, 4 /* e16 */
%y:vr = PseudoVIOTA_M_M1 $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: viota_m_mask_incompatible_emul_from_sew
@@ -1419,8 +1666,10 @@ body: |
; CHECK-LABEL: name: viota_m_mask_incompatible_emul_from_sew
; CHECK: %x:vr = PseudoVMAND_MM_B1 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: early-clobber %y:vr = PseudoVIOTA_M_M1 $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMAND_MM_B1 $noreg, $noreg, -1, 0
%y:vr = PseudoVIOTA_M_M1 $noreg, %x, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: viota_m_mask_incompatible_emul_from_lmul
@@ -1429,8 +1678,10 @@ body: |
; CHECK-LABEL: name: viota_m_mask_incompatible_emul_from_lmul
; CHECK: %x:vr = PseudoVMAND_MM_B1 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: early-clobber %y:vr = PseudoVIOTA_M_MF2 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMAND_MM_B1 $noreg, $noreg, -1, 0
%y:vr = PseudoVIOTA_M_MF2 $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vred_vs2
@@ -1439,8 +1690,10 @@ body: |
; CHECK-LABEL: name: vred_vs2
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vred_vs1
@@ -1449,8 +1702,10 @@ body: |
; CHECK-LABEL: name: vred_vs1
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vred_vs1_vs2
@@ -1459,8 +1714,10 @@ body: |
; CHECK-LABEL: name: vred_vs1_vs2
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vred_vs1_vs2_incompatible_eew
@@ -1469,8 +1726,10 @@ body: |
; CHECK-LABEL: name: vred_vs1_vs2_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vred_vs1_vs2_incompatible_emul
@@ -1479,8 +1738,10 @@ body: |
; CHECK-LABEL: name: vred_vs1_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vred_other_user_is_vl0
@@ -1490,9 +1751,13 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: vred_both_vl0
@@ -1502,9 +1767,13 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: vred_vl0_and_vlreg
@@ -1515,10 +1784,14 @@ body: |
; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, %vl, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%vl:gprnox0 = COPY $x1
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, %vl, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: vred_vlreg_and_vl0
@@ -1529,10 +1802,14 @@ body: |
; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%vl:gprnox0 = COPY $x1
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: vred_other_user_is_vl2
@@ -1542,9 +1819,13 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 2, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: vwred_vs2
@@ -1553,8 +1834,10 @@ body: |
; CHECK-LABEL: name: vwred_vs2
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVWREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwred_vs1
@@ -1563,8 +1846,10 @@ body: |
; CHECK-LABEL: name: vwred_vs1
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVWREDSUM_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVWREDSUM_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwred_vs1_incompatible_eew
@@ -1573,8 +1858,10 @@ body: |
; CHECK-LABEL: name: vwred_vs1_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVWREDSUM_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWREDSUM_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwred_vs2_incompatible_eew
@@ -1583,8 +1870,10 @@ body: |
; CHECK-LABEL: name: vwred_vs2_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVWREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVWREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vwred_incompatible_emul
@@ -1593,8 +1882,10 @@ body: |
; CHECK-LABEL: name: vwred_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVWREDSUM_VS_MF2_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVWREDSUM_VS_MF2_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vfred_vs2
@@ -1603,8 +1894,10 @@ body: |
; CHECK-LABEL: name: vfred_vs2
; CHECK: %x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, 1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVFREDMAX_VS_M1_E32 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 5 /* e32 */, 0
%y:vr = PseudoVFREDMAX_VS_M1_E32 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vfred_vs1
@@ -1613,8 +1906,10 @@ body: |
; CHECK-LABEL: name: vfred_vs1
; CHECK: %x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, 1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVFREDMAX_VS_M1_E32 $noreg, $noreg, %x, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 5 /* e32 */, 0
%y:vr = PseudoVFREDMAX_VS_M1_E32 $noreg, $noreg, %x, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vfred_vs1_vs2
@@ -1623,8 +1918,10 @@ body: |
; CHECK-LABEL: name: vfred_vs1_vs2
; CHECK: %x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, 1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVFREDMAX_VS_M1_E32 $noreg, %x, %x, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 5 /* e32 */, 0
%y:vr = PseudoVFREDMAX_VS_M1_E32 $noreg, %x, %x, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vfred_vs1_vs2_incompatible_eew
@@ -1633,8 +1930,10 @@ body: |
; CHECK-LABEL: name: vfred_vs1_vs2_incompatible_eew
; CHECK: %x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 6 /* e64 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVFREDMAX_VS_M1_E32 $noreg, %x, %x, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 6 /* e64 */, 0
%y:vr = PseudoVFREDMAX_VS_M1_E32 $noreg, %x, %x, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vfred_vs1_vs2_incompatible_emul
@@ -1643,8 +1942,10 @@ body: |
; CHECK-LABEL: name: vfred_vs1_vs2_incompatible_emul
; CHECK: %x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVFREDMAX_VS_MF2_E32 $noreg, %x, %x, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 5 /* e32 */, 0
%y:vr = PseudoVFREDMAX_VS_MF2_E32 $noreg, %x, %x, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vwred_passthru_use
@@ -1654,9 +1955,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVWREDSUM_VS_MF2_E8 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWREDSUM_VS_MF2_E8 %x, $noreg, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vwred_passthru_use_incompatible_eew
@@ -1666,9 +1969,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVWREDSUM_VS_MF2_E8 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVWREDSUM_VS_MF2_E8 %x, $noreg, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vwred_passthru_use_incompatible_emul
@@ -1678,9 +1983,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVWREDSUM_VS_MF4_E8 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVWREDSUM_VS_MF4_E8 %x, $noreg, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_MF2 $noreg, %y, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %z
...
---
name: vfirst_v
@@ -1749,8 +2056,10 @@ body: |
; CHECK-LABEL: name: vmclr_m
; CHECK: %x:vr = PseudoVMCLR_M_B8 1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMCLR_M_B8 -1, 0
%y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vmclr_m_incompatible_eew
@@ -1759,8 +2068,10 @@ body: |
; CHECK-LABEL: name: vmclr_m_incompatible_eew
; CHECK: %x:vr = PseudoVMCLR_M_B8 -1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMCLR_M_B8 -1, 0
%y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmclr_m_incompatible_emul
@@ -1769,8 +2080,10 @@ body: |
; CHECK-LABEL: name: vmclr_m_incompatible_emul
; CHECK: %x:vr = PseudoVMCLR_M_B8 -1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMCLR_M_B8 -1, 0
%y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vmset_m
@@ -1779,8 +2092,10 @@ body: |
; CHECK-LABEL: name: vmset_m
; CHECK: %x:vr = PseudoVMSET_M_B8 1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMSET_M_B8 -1, 0
%y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vmset_m_incompatible_eew
@@ -1789,8 +2104,10 @@ body: |
; CHECK-LABEL: name: vmset_m_incompatible_eew
; CHECK: %x:vr = PseudoVMSET_M_B8 -1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMSET_M_B8 -1, 0
%y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vmset_m_incompatible_emul
@@ -1799,8 +2116,10 @@ body: |
; CHECK-LABEL: name: vmset_m_incompatible_emul
; CHECK: %x:vr = PseudoVMSET_M_B8 -1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVMSET_M_B8 -1, 0
%y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0
+ $v8 = COPY %y
...
---
name: vrgatherei16_vv
@@ -1811,6 +2130,7 @@ body: |
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
%x:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vrgatherei16_vv_incompatible_data_eew
@@ -1821,6 +2141,7 @@ body: |
; CHECK-NEXT: early-clobber %y:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vrgatherei16_vv_incompatible_index_eew
@@ -1831,6 +2152,7 @@ body: |
; CHECK-NEXT: early-clobber %y:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, $noreg, %x, 1, 5 /* e32 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, $noreg, %x, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vrgatherei16_vv_incompatible_dest_emul
@@ -1841,6 +2163,7 @@ body: |
; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
%x:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0
%y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vrgatherei16_vv_incompatible_source_emul
@@ -1851,6 +2174,7 @@ body: |
; CHECK-NEXT: early-clobber %y:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_MF2 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0
%y:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
...
---
name: vrgatherei16_vv_incompatible_index_emul
@@ -1861,3 +2185,4 @@ body: |
; CHECK-NEXT: early-clobber %y:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, $noreg, %x, 1, 5 /* e32 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
%y:vr = PseudoVRGATHEREI16_VV_M1_E32_MF2 $noreg, $noreg, %x, 1, 5 /* e32 */, 0
+ $v8 = COPY %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 9883351..60398cd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -12,9 +12,11 @@ body: |
; CHECK-NEXT: %vl:gprnox0 = COPY $x1
; CHECK-NEXT: %x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVNSRL_WV_MF4 $noreg, %x, $noreg, %vl, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%vl:gprnox0 = COPY $x1
%x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
%y:vr = PseudoVNSRL_WV_MF4 $noreg, %x, $noreg, %vl, 4 /* e16 */, 0 /* tu, mu */
+ $v8 = COPY %y
...
---
name: vredsum_vv_user
@@ -28,10 +30,14 @@ body: |
; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E64 $noreg, %x, $noreg, -1, 6 /* e64 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%vl:gprnox0 = COPY $x1
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
%y:vr = PseudoVREDSUM_VS_M1_E64 $noreg, %x, $noreg, -1, 6 /* e64 */, 0 /* tu, mu */
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0 /* tu, mu */
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: use_largest_common_vl_imm_imm
@@ -41,9 +47,13 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 2, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: use_largest_common_vl_same_reg
@@ -57,10 +67,14 @@ body: |
; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, %vl, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%vl:gprnox0 = COPY $x1
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: use_largest_common_vl_diff_regs
@@ -75,11 +89,15 @@ body: |
; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl0, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%vl0:gprnox0 = COPY $x1
%vl1:gprnox0 = COPY $x2
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl0, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl1, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: use_largest_common_vl_imm_reg
@@ -93,10 +111,14 @@ body: |
; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%vl:gprnox0 = COPY $x1
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: use_largest_common_vl_imm_vlmax
@@ -106,9 +128,13 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: $v9 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, -1, 3 /* e8 */, 0
+ $v8 = COPY %y
+ $v9 = COPY %z
...
---
name: vfcvt_x_f_v_nofpexcept
@@ -117,8 +143,10 @@ body: |
; CHECK-LABEL: name: vfcvt_x_f_v_nofpexcept
; CHECK: %x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 3 /* e32 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vfcvt_x_f_v_fpexcept
@@ -127,8 +155,10 @@ body: |
; CHECK-LABEL: name: vfcvt_x_f_v_fpexcept
; CHECK: %x:vr = PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 3 /* e32 */, 0
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+ $v8 = COPY %y
...
---
name: vfncvtbf16_f_f_w_nofpexcept
@@ -137,8 +167,10 @@ body: |
; CHECK-LABEL: name: vfncvtbf16_f_f_w_nofpexcept
; CHECK: early-clobber %x:vr = nofpexcept PseudoVFNCVTBF16_F_F_W_M1_E16 $noreg, $noreg, 7, 1, 4 /* e16 */, 0 /* tu, mu */, implicit $frm
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = nofpexcept PseudoVFNCVTBF16_F_F_W_M1_E16 $noreg, $noreg, 7, -1, 4 /* e16 */, 0 /* tu, mu */, implicit $frm
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+ $v8 = COPY %y
...
---
name: vfsqrt_nofpexcept
@@ -147,8 +179,10 @@ body: |
; CHECK-LABEL: name: vfsqrt_nofpexcept
; CHECK: %x:vrm2 = nofpexcept PseudoVFSQRT_V_M2_E32 $noreg, $noreg, 7, 6, 5 /* e32 */, 3 /* ta, ma */, implicit $frm
; CHECK-NEXT: early-clobber %y:vr = nofpexcept PseudoVFNCVTBF16_F_F_W_M1_E16 $noreg, %x, 7, 6, 4 /* e16 */, 3 /* ta, ma */, implicit $frm
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vrm2 = nofpexcept PseudoVFSQRT_V_M2_E32 $noreg, $noreg, 7, 8, 5, 3, implicit $frm
early-clobber %y:vr = nofpexcept PseudoVFNCVTBF16_F_F_W_M1_E16 $noreg, %x, 7, 6, 4, 3, implicit $frm
+ $v8 = COPY %y
...
---
name: vfsqrt_fpexcept
@@ -157,8 +191,10 @@ body: |
; CHECK-LABEL: name: vfsqrt_fpexcept
; CHECK: %x:vrm2 = PseudoVFSQRT_V_M2_E32 $noreg, $noreg, 7, 8, 5 /* e32 */, 3 /* ta, ma */, implicit $frm
; CHECK-NEXT: early-clobber %y:vr = nofpexcept PseudoVFNCVTBF16_F_F_W_M1_E16 $noreg, %x, 7, 6, 4 /* e16 */, 3 /* ta, ma */, implicit $frm
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vrm2 = PseudoVFSQRT_V_M2_E32 $noreg, $noreg, 7, 8, 5, 3, implicit $frm
early-clobber %y:vr = nofpexcept PseudoVFNCVTBF16_F_F_W_M1_E16 $noreg, %x, 7, 6, 4, 3, implicit $frm
+ $v8 = COPY %y
...
---
name: vfrsqrt7_nofpexcept
@@ -167,8 +203,10 @@ body: |
; CHECK-LABEL: name: vfrsqrt7_nofpexcept
; CHECK: %x:vrm2 = nofpexcept PseudoVFRSQRT7_V_M2_E32 $noreg, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrm2 = PseudoVADD_VV_M2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vrm2 = nofpexcept PseudoVFRSQRT7_V_M2_E32 $noreg, $noreg, 7, 5, 0
%y:vrm2 = PseudoVADD_VV_M2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8m2 = COPY %y
...
---
name: vfrsqrt7_fpexcept
@@ -177,8 +215,10 @@ body: |
; CHECK-LABEL: name: vfrsqrt7_fpexcept
; CHECK: %x:vrm2 = PseudoVFRSQRT7_V_M2_E32 $noreg, $noreg, 7, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vrm2 = PseudoVADD_VV_M2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vrm2 = PseudoVFRSQRT7_V_M2_E32 $noreg, $noreg, 7, 5, 0
%y:vrm2 = PseudoVADD_VV_M2 $noreg, %x, $noreg, 1, 5 /* e32 */, 0
+ $v8m2 = COPY %y
...
---
name: vwadd_tied_vs1
@@ -187,8 +227,10 @@ body: |
; CHECK-LABEL: name: vwadd_tied_vs1
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: early-clobber %y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8m2 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%y:vrm2 = PseudoVWADD_WV_M1_TIED $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8m2 = COPY %y
...
---
name: crossbb
@@ -202,11 +244,13 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: %a1:vr = PseudoVADD_VV_M1 $noreg, %c, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %a2:vr = PseudoVADD_VV_M1 $noreg, %a1, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %a2
; CHECK-NEXT: PseudoRET
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: %b1:vr = PseudoVADD_VV_M1 $noreg, %c, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %b2:vr = PseudoVADD_VV_M1 $noreg, %b1, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %b2
; CHECK-NEXT: PseudoRET
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
@@ -221,10 +265,12 @@ body: |
bb.1:
%a1:vr = PseudoVADD_VV_M1 $noreg, %c, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%a2:vr = PseudoVADD_VV_M1 $noreg, %a1, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %a2
PseudoRET
bb.2:
%b1:vr = PseudoVADD_VV_M1 $noreg, %c, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%b2:vr = PseudoVADD_VV_M1 $noreg, %b1, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %b2
PseudoRET
bb.3:
liveins: $x1
@@ -237,17 +283,21 @@ name: unreachable
body: |
; CHECK-LABEL: name: unreachable
; CHECK: bb.0:
- ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %x
; CHECK-NEXT: PseudoRET
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
; CHECK-NEXT: PseudoRET
bb.0:
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %x
PseudoRET
bb.1:
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %y
PseudoRET
...
---
@@ -259,9 +309,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%y:vr = PseudoVADD_VV_M1 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %z
...
---
# Can't reduce %x because %y uses it as a passthru, and %y's inactive elements are demanded by %z
@@ -272,9 +324,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%y:vr = PseudoVADD_VV_M1 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %z
...
---
# Can reduce %x even though %y uses it as a passthru, because %y's inactive elements aren't demanded
@@ -287,11 +341,13 @@ body: |
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 %y, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %a:vr = PseudoVADD_VV_M1 %z, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %b:vr = PseudoVADD_VV_M1 $noreg, %a, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %b
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%y:vr = PseudoVADD_VV_M1 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
%z:vr = PseudoVADD_VV_M1 %y, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
%a:vr = PseudoVADD_VV_M1 %z, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
%b:vr = PseudoVADD_VV_M1 $noreg, %a, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %b
...
---
# Can't reduce %x because %y uses it as a passthru, and %y's inactive elements are ultimately demanded in %b
@@ -304,11 +360,13 @@ body: |
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 %y, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %a:vr = PseudoVADD_VV_M1 %z, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %b:vr = PseudoVADD_VV_M1 $noreg, %a, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %b
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%y:vr = PseudoVADD_VV_M1 %x, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
%z:vr = PseudoVADD_VV_M1 %y, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
%a:vr = PseudoVADD_VV_M1 %z, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
%b:vr = PseudoVADD_VV_M1 $noreg, %a, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %b
...
---
name: vxsat_dead
@@ -317,8 +375,10 @@ body: |
; CHECK-LABEL: name: vxsat_dead
; CHECK: %x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vxsat
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vxsat
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %y
...
---
name: vxsat_not_dead
@@ -327,8 +387,10 @@ body: |
; CHECK-LABEL: name: vxsat_not_dead
; CHECK: %x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */, implicit-def $vxsat
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */, implicit-def $vxsat
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %y
...
---
name: copy
@@ -338,9 +400,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = COPY %x
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%y:vr = COPY %x
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %z
...
---
name: copy_multiple_users
@@ -351,10 +415,14 @@ body: |
; CHECK-NEXT: %y:vr = COPY %x
; CHECK-NEXT: %z0:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z1:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 3, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z0
+ ; CHECK-NEXT: $v9 = COPY %z1
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%y:vr = COPY %x
%z0:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
%z1:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 3, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %z0
+ $v9 = COPY %z1
...
---
name: copy_user_invalid_sew
@@ -364,9 +432,11 @@ body: |
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = COPY %x
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
%y:vr = COPY %x
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ $v8 = COPY %z
...
---
name: phi
@@ -387,6 +457,7 @@ body: |
; CHECK-NEXT: bb.2:
; CHECK-NEXT: %y:vr = PHI %w, %bb.0, %x, %bb.1
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
bb.0:
%w:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
BNE $noreg, $noreg, %bb.2
@@ -395,6 +466,7 @@ body: |
bb.2:
%y:vr = PHI %w, %bb.0, %x, %bb.1
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %z
...
---
name: phi_user_invalid_sew
@@ -415,6 +487,7 @@ body: |
; CHECK-NEXT: bb.2:
; CHECK-NEXT: %y:vr = PHI %w, %bb.0, %x, %bb.1
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
bb.0:
%w:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
BNE $noreg, $noreg, %bb.2
@@ -423,6 +496,7 @@ body: |
bb.2:
%y:vr = PHI %w, %bb.0, %x, %bb.1
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ $v8 = COPY %z
...
---
name: phi_different_incoming_sew
@@ -443,6 +517,7 @@ body: |
; CHECK-NEXT: bb.2:
; CHECK-NEXT: %y:vr = PHI %w, %bb.0, %x, %bb.1
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
bb.0:
%w:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
BNE $noreg, $noreg, %bb.2
@@ -451,6 +526,7 @@ body: |
bb.2:
%y:vr = PHI %w, %bb.0, %x, %bb.1
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %z
...
---
name: phi_cycle_direct
@@ -467,12 +543,14 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %y:vr = PHI %x, %bb.0, %y, %bb.1
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
; CHECK-NEXT: PseudoBR %bb.1
bb.0:
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
bb.1:
%y:vr = PHI %x, %bb.0, %y, %bb.1
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %z
PseudoBR %bb.1
...
---
@@ -490,12 +568,14 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %y:vr = PHI %x, %bb.0, %z, %bb.1
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
; CHECK-NEXT: PseudoBR %bb.1
bb.0:
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
bb.1:
%y:vr = PHI %x, %bb.0, %z, %bb.1
%z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ $v8 = COPY %z
PseudoBR %bb.1
...
---
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
index 1e2e779..2f2035b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
@@ -222,3 +222,14 @@ define <vscale x 1 x i64> @vleff_move_past_passthru(ptr %p, ptr %q, iXLen %avl)
%b = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %vec, iXLen %avl)
ret <vscale x 1 x i64> %b
}
+
+define <vscale x 1 x i64> @vmerge(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %x, <vscale x 1 x i64> %y, <vscale x 1 x i1> %m, iXLen %avl) {
+; CHECK-LABEL: vmerge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %x, <vscale x 1 x i64> %y, <vscale x 1 x i1> %m, iXLen %avl)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %a, iXLen %avl)
+ ret <vscale x 1 x i64> %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
index 6e106e5..9c3e96d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
@@ -152,3 +152,19 @@ body: |
%y:gpr = ADDI $x0, 1
%z:vr = PseudoVMV_V_V_M1 %passthru, %x, 4, 5 /* e32 */, 0 /* tu, mu */
...
+---
+name: vmerge_vvm
+body: |
+ bb.0:
+ liveins: $v8, $v0
+ ; CHECK-LABEL: name: vmerge_vvm
+ ; CHECK: liveins: $v8, $v0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %passthru:vrnov0 = COPY $v8
+ ; CHECK-NEXT: %mask:vmv0 = COPY $v0
+ ; CHECK-NEXT: %x:vrnov0 = PseudoVMERGE_VVM_M1 %passthru, %passthru, $noreg, %mask, 4, 5 /* e32 */
+ %passthru:vr = COPY $v8
+ %mask:vmv0 = COPY $v0
+ %x:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, %passthru, $noreg, %mask, 4, 5 /* e32 */
+ %z:vr = PseudoVMV_V_V_M1 %passthru, %x, 4, 5 /* e32 */, 0 /* tu, mu */
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll
index 23c0c82..2afb72f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll
@@ -674,16 +674,20 @@ define <vscale x 2 x i32> @load_factor2_oneactive(ptr %ptr, i32 %evl) {
define <vscale x 2 x i32> @load_factor5_oneactive(ptr %ptr, i32 %evl) {
; RV32-LABEL: load_factor5_oneactive:
; RV32: # %bb.0:
+; RV32-NEXT: addi a0, a0, 12
+; RV32-NEXT: li a2, 20
; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; RV32-NEXT: vlseg5e32.v v5, (a0)
+; RV32-NEXT: vlse32.v v8, (a0), a2
; RV32-NEXT: ret
;
; RV64-LABEL: load_factor5_oneactive:
; RV64: # %bb.0:
; RV64-NEXT: slli a1, a1, 32
+; RV64-NEXT: addi a0, a0, 12
; RV64-NEXT: srli a1, a1, 32
+; RV64-NEXT: li a2, 20
; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; RV64-NEXT: vlseg5e32.v v5, (a0)
+; RV64-NEXT: vlse32.v v8, (a0), a2
; RV64-NEXT: ret
%rvl = mul nuw i32 %evl, 5
%wide.masked.load = call <vscale x 10 x i32> @llvm.vp.load(ptr %ptr, <vscale x 10 x i1> splat (i1 true), i32 %rvl)
diff --git a/llvm/test/CodeGen/RISCV/xmips-cbop.ll b/llvm/test/CodeGen/RISCV/xmips-cbop.ll
index cbbd1de..0d5defc 100644
--- a/llvm/test/CodeGen/RISCV/xmips-cbop.ll
+++ b/llvm/test/CodeGen/RISCV/xmips-cbop.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=riscv32 -mattr=+xmipscbop -mattr=+m -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+xmipscbop -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32XMIPSPREFETCH
-; RUN: llc -mtriple=riscv64 -mattr=+xmipscbop -mattr=+m -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+xmipscbop -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64XMIPSPREFETCH
define void @prefetch_data_read(ptr noundef %ptr) nounwind {
@@ -49,3 +49,54 @@ define void @prefetch_inst_read(ptr noundef %ptr) nounwind {
tail call void @llvm.prefetch.p0(ptr nonnull %arrayidx, i32 0, i32 0, i32 0)
ret void
}
+
+define void @prefetch_frameindex_test_neg() nounwind {
+; RV32XMIPSPREFETCH-LABEL: prefetch_frameindex_test_neg:
+; RV32XMIPSPREFETCH: # %bb.0:
+; RV32XMIPSPREFETCH-NEXT: lui a0, 1
+; RV32XMIPSPREFETCH-NEXT: addi a0, a0, 16
+; RV32XMIPSPREFETCH-NEXT: sub sp, sp, a0
+; RV32XMIPSPREFETCH-NEXT: addi a0, sp, 524
+; RV32XMIPSPREFETCH-NEXT: mips.pref 8, 0(a0)
+; RV32XMIPSPREFETCH-NEXT: lui a0, 1
+; RV32XMIPSPREFETCH-NEXT: addi a0, a0, 16
+; RV32XMIPSPREFETCH-NEXT: add sp, sp, a0
+; RV32XMIPSPREFETCH-NEXT: ret
+;
+; RV64XMIPSPREFETCH-LABEL: prefetch_frameindex_test_neg:
+; RV64XMIPSPREFETCH: # %bb.0:
+; RV64XMIPSPREFETCH-NEXT: lui a0, 1
+; RV64XMIPSPREFETCH-NEXT: addi a0, a0, 16
+; RV64XMIPSPREFETCH-NEXT: sub sp, sp, a0
+; RV64XMIPSPREFETCH-NEXT: addi a0, sp, 524
+; RV64XMIPSPREFETCH-NEXT: mips.pref 8, 0(a0)
+; RV64XMIPSPREFETCH-NEXT: lui a0, 1
+; RV64XMIPSPREFETCH-NEXT: addi a0, a0, 16
+; RV64XMIPSPREFETCH-NEXT: add sp, sp, a0
+; RV64XMIPSPREFETCH-NEXT: ret
+ %data = alloca [1024 x i32], align 4
+ %ptr = getelementptr [1024 x i32], ptr %data, i32 0, i32 127
+ call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
+ ret void
+}
+
+define void @prefetch_frameindex_test() nounwind {
+; RV32XMIPSPREFETCH-LABEL: prefetch_frameindex_test:
+; RV32XMIPSPREFETCH: # %bb.0:
+; RV32XMIPSPREFETCH-NEXT: addi sp, sp, -512
+; RV32XMIPSPREFETCH-NEXT: mips.pref 8, 32(sp)
+; RV32XMIPSPREFETCH-NEXT: addi sp, sp, 512
+; RV32XMIPSPREFETCH-NEXT: ret
+;
+; RV64XMIPSPREFETCH-LABEL: prefetch_frameindex_test:
+; RV64XMIPSPREFETCH: # %bb.0:
+; RV64XMIPSPREFETCH-NEXT: addi sp, sp, -512
+; RV64XMIPSPREFETCH-NEXT: mips.pref 8, 32(sp)
+; RV64XMIPSPREFETCH-NEXT: addi sp, sp, 512
+; RV64XMIPSPREFETCH-NEXT: ret
+ %data = alloca [128 x i32], align 4
+ %base = bitcast ptr %data to ptr
+ %ptr = getelementptr [128 x i32], ptr %base, i32 0, i32 8
+ call void @llvm.prefetch(ptr %ptr, i32 0, i32 0, i32 1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll
new file mode 100644
index 0000000..b2333e6
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll
@@ -0,0 +1,42 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv-unknown-vulkan %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan %s -o - -filetype=obj | spirv-val %}
+
+@.str = private unnamed_addr constant [4 x i8] c"In3\00", align 1
+@.str.2 = private unnamed_addr constant [5 x i8] c"Out4\00", align 1
+@.str.3 = private unnamed_addr constant [5 x i8] c"Out3\00", align 1
+
+
+; CHECK-DAG: %[[#INT32:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#INT4:]] = OpTypeVector %[[#INT32]] 4
+; CHECK-DAG: %[[#FLOAT:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#FLOAT4:]] = OpTypeVector %[[#FLOAT]] 4
+; CHECK-DAG: %[[#INT3:]] = OpTypeVector %[[#INT32]] 3
+; CHECK-DAG: %[[#UNDEF_INT4:]] = OpUndef %[[#INT4]]
+
+define void @case1() local_unnamed_addr {
+ ; CHECK: %[[#BUFFER_LOAD:]] = OpLoad %[[#FLOAT4]] %{{[0-9]+}} Aligned 16
+ ; CHECK: %[[#CAST_LOAD:]] = OpBitcast %[[#INT4]] %[[#BUFFER_LOAD]]
+ ; CHECK: %[[#VEC_SHUFFLE:]] = OpVectorShuffle %[[#INT4]] %[[#CAST_LOAD]] %[[#CAST_LOAD]] 0 1 2 3
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %2 = tail call target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4i32_12_1t(i32 0, i32 5, i32 1, i32 0, i1 false, ptr nonnull @.str.2)
+ %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_0t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) %1, i32 0)
+ %4 = load <4 x i32>, ptr addrspace(11) %3, align 16
+ %5 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4i32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1) %2, i32 0)
+ store <4 x i32> %4, ptr addrspace(11) %5, align 16
+ ret void
+}
+
+define void @case2() local_unnamed_addr {
+ ; CHECK: %[[#BUFFER_LOAD:]] = OpLoad %[[#FLOAT4]] %{{[0-9]+}} Aligned 16
+ ; CHECK: %[[#CAST_LOAD:]] = OpBitcast %[[#INT4]] %[[#BUFFER_LOAD]]
+ ; CHECK: %[[#VEC_SHUFFLE:]] = OpVectorShuffle %[[#INT4]] %[[#CAST_LOAD]] %[[#CAST_LOAD]] 0 1 2 3
+ ; CHECK: %[[#VEC_TRUNCATE:]] = OpVectorShuffle %[[#INT3]] %[[#VEC_SHUFFLE]] %[[#UNDEF_INT4]] 0 1 2
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %2 = tail call target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v3i32_12_1t(i32 0, i32 5, i32 1, i32 0, i1 false, ptr nonnull @.str.3)
+ %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_0t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) %1, i32 0)
+ %4 = load <4 x i32>, ptr addrspace(11) %3, align 16
+ %5 = shufflevector <4 x i32> %4, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+ %6 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v3i32_12_1t(target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1) %2, i32 0)
+ store <3 x i32> %5, ptr addrspace(11) %6, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/is_fpclass.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/is_fpclass.ll
new file mode 100644
index 0000000..ec8330c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/is_fpclass.ll
@@ -0,0 +1,408 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[#BoolTy:]] = OpTypeBool
+; CHECK-DAG: %[[#FP32Ty:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#FP64Ty:]] = OpTypeFloat 64
+; CHECK-DAG: %[[#FP16Ty:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#I32Ty:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#I64Ty:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#I16Ty:]] = OpTypeInt 16 0
+
+; CHECK-DAG: %[[#V4I32Ty:]] = OpTypeVector %[[#I32Ty]] 4
+; CHECK-DAG: %[[#V4FP32Ty:]] = OpTypeVector %[[#FP32Ty]] 4
+; CHECK-DAG: %[[#V4BoolTy:]] = OpTypeVector %[[#BoolTy]] 4
+
+; CHECK-DAG: %[[#MaxExpMinus1:]] = OpConstant %[[#I32Ty]] 2130706432
+; CHECK-DAG: %[[#ExpLSB:]] = OpConstant %[[#I32Ty]] 8388608
+; CHECK-DAG: %[[#True:]] = OpConstantTrue %[[#BoolTy]]
+; CHECK-DAG: %[[#False:]] = OpConstantFalse %[[#BoolTy]]
+; CHECK-DAG: %[[#ValueMask:]] = OpConstant %[[#I32Ty]] 2147483647
+; CHECK-DAG: %[[#InfWithQnanBit:]] = OpConstant %[[#I32Ty]] 2143289344
+; CHECK-DAG: %[[#Inf:]] = OpConstant %[[#I32Ty]] 2139095040
+; CHECK-DAG: %[[#NegInf:]] = OpConstant %[[#I32Ty]] 4286578688
+; CHECK-DAG: %[[#One:]] = OpConstant %[[#I32Ty]] 1
+; CHECK-DAG: %[[#Zero:]] = OpConstantNull %[[#I32Ty]]
+; CHECK-DAG: %[[#AllOneMantissa:]] = OpConstant %[[#I32Ty]] 8388607
+; CHECK-DAG: %[[#SignBit:]] = OpConstant %[[#I32Ty]] 2147483648
+
+; CHECK-DAG: %[[#ValueMaskFP64:]] = OpConstant %[[#I64Ty]] 9223372036854775807
+; CHECK-DAG: %[[#InfFP64:]] = OpConstant %[[#I64Ty]] 9218868437227405312
+; CHECK-DAG: %[[#NegInfFP64:]] = OpConstant %[[#I64Ty]] 18442240474082181120
+
+; CHECK-DAG: %[[#FalseV4:]] = OpConstantComposite %[[#V4BoolTy]] %[[#False]] %[[#False]] %[[#False]] %[[#False]]
+; CHECK-DAG: %[[#ValueMaskV4:]] = OpConstantComposite %[[#V4I32Ty]] %[[#ValueMask]] %[[#ValueMask]] %[[#ValueMask]] %[[#ValueMask]]
+; CHECK-DAG: %[[#InfV4:]] = OpConstantComposite %[[#V4I32Ty]] %[[#Inf]] %[[#Inf]] %[[#Inf]] %[[#Inf]]
+; CHECK-DAG: %[[#InfWithQnanBitV4:]] = OpConstantComposite %[[#V4I32Ty]] %[[#InfWithQnanBit]] %[[#InfWithQnanBit]] %[[#InfWithQnanBit]] %[[#InfWithQnanBit]]
+; CHECK-DAG: %[[#ValueMaskFP16:]] = OpConstant %[[#I16Ty]] 32767
+; CHECK-DAG: %[[#InfFP16:]] = OpConstant %[[#I16Ty]] 31744
+; CHECK-DAG: %[[#NegInfFP16:]] = OpConstant %[[#I16Ty]] 64512
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: OpReturnValue %[[#False]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_0_none(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 0)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpUGreaterThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#InfWithQnanBit]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T2]] %[[#T3]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: OpReturnValue %[[#T5]]
+; CHECK: OpFunctionEnd
+
+define i1 @isfpclass_1_issnan(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 1)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#V4BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#V4FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#V4I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#V4I32Ty]] %[[#T0]] %[[#ValueMaskV4]]
+; CHECK: %[[#T2:]] = OpUGreaterThan %[[#V4BoolTy]] %[[#T1]] %[[#InfV4]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#V4BoolTy]] %[[#T1]] %[[#InfWithQnanBitV4]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#V4BoolTy]] %[[#T2]] %[[#T3]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#V4BoolTy]] %[[#FalseV4]] %[[#T4]]
+; CHECK: OpReturnValue %[[#T5]]
+; CHECK: OpFunctionEnd
+
+define <4 x i1> @isfpclass_1_issnan_v4f32(<4 x float> %a) {
+ %v = call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> %a, i32 1)
+ ret <4 x i1> %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpUGreaterThanEqual %[[#BoolTy]] %[[#T1]] %[[#InfWithQnanBit]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isqnan(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 2)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpUGreaterThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isnan(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 3)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#Inf]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_ispinf(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 512)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#NegInf]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isninf(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 4)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpIEqual %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isinf(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 516)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#ExpLSB]]
+; CHECK: %[[#T4:]] = OpULessThan %[[#BoolTy]] %[[#T3]] %[[#MaxExpMinus1]]
+; CHECK: %[[#T5:]] = OpLogicalNotEqual %[[#BoolTy]] %[[#T2]] %[[#True]]
+; CHECK: %[[#T6:]] = OpLogicalAnd %[[#BoolTy]] %[[#T4]] %[[#T5]]
+; CHECK: %[[#T7:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T6]]
+; CHECK: OpReturnValue %[[#T7]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isposnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 256)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#ExpLSB]]
+; CHECK: %[[#T4:]] = OpULessThan %[[#BoolTy]] %[[#T3]] %[[#MaxExpMinus1]]
+; CHECK: %[[#T5:]] = OpLogicalAnd %[[#BoolTy]] %[[#T4]] %[[#T2]]
+; CHECK: %[[#T6:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T5]]
+; CHECK: OpReturnValue %[[#T6]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnegnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 8)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#ExpLSB]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T2]] %[[#MaxExpMinus1]]
+; CHECK: %[[#T4:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T3]]
+; CHECK: OpReturnValue %[[#T4]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 264)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpUGreaterThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: %[[#T4:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#ExpLSB]]
+; CHECK: %[[#T5:]] = OpULessThan %[[#BoolTy]] %[[#T4]] %[[#MaxExpMinus1]]
+; CHECK: %[[#T6:]] = OpLogicalOr %[[#BoolTy]] %[[#T3]] %[[#T5]]
+; CHECK: OpReturnValue %[[#T6]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_1_isnan_or_normal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 267)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpISub %[[#I32Ty]] %[[#T0]] %[[#One]]
+; CHECK: %[[#T2:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#AllOneMantissa]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_ispsubnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 128)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#One]]
+; CHECK: %[[#T4:]] = OpULessThan %[[#BoolTy]] %[[#T3]] %[[#AllOneMantissa]]
+; CHECK: %[[#T5:]] = OpLogicalAnd %[[#BoolTy]] %[[#T4]] %[[#T2]]
+; CHECK: %[[#T6:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T5]]
+; CHECK: OpReturnValue %[[#T6]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnsubnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 16)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpISub %[[#I32Ty]] %[[#T1]] %[[#One]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T2]] %[[#AllOneMantissa]]
+; CHECK: %[[#T4:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T3]]
+; CHECK: OpReturnValue %[[#T4]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_issubnormal(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 144)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#Zero]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_ispzero(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 64)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#SignBit]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnzero(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 32)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpIEqual %[[#BoolTy]] %[[#T1]] %[[#Zero]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_iszero(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 96)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpULessThan %[[#BoolTy]] %[[#T0]] %[[#Inf]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: OpReturnValue %[[#T2]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_ispfinite(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 448)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T3]] %[[#T2]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: OpReturnValue %[[#T5]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnfinite(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 56)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T3:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T2]]
+; CHECK: OpReturnValue %[[#T3]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isfinite(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 504)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpULessThan %[[#BoolTy]] %[[#T0]] %[[#Inf]]
+; CHECK: %[[#T2:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#Inf]]
+; CHECK: %[[#T4:]] = OpLogicalOr %[[#BoolTy]] %[[#T2]] %[[#T3]]
+; CHECK: OpReturnValue %[[#T4]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_ispositive(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 960)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I32Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I32Ty]] %[[#T0]] %[[#ValueMask]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#Inf]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T3]] %[[#T2]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: %[[#T6:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#NegInf]]
+; CHECK: %[[#T7:]] = OpLogicalOr %[[#BoolTy]] %[[#T5]] %[[#T6]]
+; CHECK: OpReturnValue %[[#T7]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_isnegative(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 60)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP32Ty]]
+; CHECK: OpReturnValue %[[#True]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_all(float %a) {
+ %v = call i1 @llvm.is.fpclass.f32(float %a, i32 1023)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP64Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I64Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I64Ty]] %[[#T0]] %[[#ValueMaskFP64]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#InfFP64]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T3]] %[[#T2]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: %[[#T6:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#NegInfFP64]]
+; CHECK: %[[#T7:]] = OpLogicalOr %[[#BoolTy]] %[[#T5]] %[[#T6]]
+; CHECK: OpReturnValue %[[#T7]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_f64_isnegative(double %a) {
+ %v = call i1 @llvm.is.fpclass.f64(double %a, i32 60)
+ ret i1 %v
+}
+
+; CHECK: OpFunction %[[#BoolTy]]
+; CHECK: %[[#A:]] = OpFunctionParameter %[[#FP16Ty]]
+; CHECK: %[[#T0:]] = OpBitcast %[[#I16Ty]] %[[#A]]
+; CHECK: %[[#T1:]] = OpBitwiseAnd %[[#I16Ty]] %[[#T0]] %[[#ValueMaskFP16]]
+; CHECK: %[[#T2:]] = OpINotEqual %[[#BoolTy]] %[[#T0]] %[[#T1]]
+; CHECK: %[[#T3:]] = OpULessThan %[[#BoolTy]] %[[#T1]] %[[#InfFP16]]
+; CHECK: %[[#T4:]] = OpLogicalAnd %[[#BoolTy]] %[[#T3]] %[[#T2]]
+; CHECK: %[[#T5:]] = OpLogicalOr %[[#BoolTy]] %[[#False]] %[[#T4]]
+; CHECK: %[[#T6:]] = OpIEqual %[[#BoolTy]] %[[#T0]] %[[#NegInfFP16]]
+; CHECK: %[[#T7:]] = OpLogicalOr %[[#BoolTy]] %[[#T5]] %[[#T6]]
+; CHECK: OpReturnValue %[[#T7]]
+; CHECK: OpFunctionEnd
+define i1 @isfpclass_f16_isnegative(half %a) {
+ %v = call i1 @llvm.is.fpclass.f16(half %a, i32 60)
+ ret i1 %v
+}
+
+declare i1 @llvm.is.fpclass.f32(float, i32)
+declare <4 x i1> @llvm.is.fpclass.v4f32(<4 x float>, i32)
+declare i1 @llvm.is.fpclass.f64(double, i32)
+declare i1 @llvm.is.fpclass.f16(half, i32)
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll
new file mode 100644
index 0000000..8b5efe7
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll
@@ -0,0 +1,104 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpCapability Float16
+; CHECK-DAG: OpCapability ImageBasic
+; CHECK-DAG: OpCapability ImageReadWrite
+; CHECK-DAG: OpCapability Pipes
+; CHECK-DAG: OpCapability DeviceEnqueue
+
+; CHECK-DAG: %[[#VOID:]] = OpTypeVoid
+; CHECK-DAG: %[[#INT:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#HALF:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#FLOAT:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#PIPE_RD:]] = OpTypePipe ReadOnly
+; CHECK-DAG: %[[#PIPE_WR:]] = OpTypePipe WriteOnly
+; CHECK-DAG: %[[#IMG1D_RD:]] = OpTypeImage %[[#VOID]] 1D 0 0 0 0 Unknown ReadOnly
+; CHECK-DAG: %[[#IMG2D_RD:]] = OpTypeImage %[[#INT]] 2D 0 0 0 0
+; CHECK-DAG: %[[#IMG3D_RD:]] = OpTypeImage %[[#INT]] 3D 0 0 0 0
+; CHECK-DAG: %[[#IMG2DA_RD:]] = OpTypeImage %[[#HALF]] 2D 0 1 0 0
+; CHECK-DAG: %[[#IMG2DD_RD:]] = OpTypeImage %[[#FLOAT]] Buffer 0 0 0
+; CHECK-DAG: %[[#IMG1D_WR:]] = OpTypeImage %[[#VOID]] 1D 0 0 0 0 Unknown WriteOnly
+; CHECK-DAG: %[[#IMG2D_RW:]] = OpTypeImage %[[#VOID]] 2D 0 0 0 0 Unknown ReadWrite
+; CHECK-DAG: %[[#IMG1DB_RD:]] = OpTypeImage %[[#FLOAT]] 2D 1 0 0 0
+
+; CHECK-DAG: %[[#DEVEVENT:]] = OpTypeDeviceEvent
+; CHECK-DAG: %[[#EVENT:]] = OpTypeEvent
+; CHECK-DAG: %[[#QUEUE:]] = OpTypeQueue
+; CHECK-DAG: %[[#RESID:]] = OpTypeReserveId
+; CHECK-DAG: %[[#SAMP:]] = OpTypeSampler
+; CHECK-DAG: %[[#SAMPIMG:]] = OpTypeSampledImage %[[#IMG1DB_RD]]
+
+; CHECK-DAG: %[[#]] = OpFunction %[[#VOID]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#PIPE_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#PIPE_WR]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG1D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG3D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2DA_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2DD_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG1D_WR]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2D_RW]]
+
+define spir_kernel void @foo(
+ target("spirv.Pipe", 0) %a,
+ target("spirv.Pipe", 1) %b,
+ target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) %c1,
+ target("spirv.Image", i32, 1, 0, 0, 0, 0, 0, 0) %d1,
+ target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) %e1,
+ target("spirv.Image", half, 1, 0, 1, 0, 0, 0, 0) %f1,
+ target("spirv.Image", float, 5, 0, 0, 0, 0, 0, 0) %g1,
+ target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 1) %c2,
+ target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 2) %d3) #0 !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 {
+entry:
+ ret void
+}
+
+; CHECK-DAG: %[[#]] = OpFunction
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#DEVEVENT]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#EVENT]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#QUEUE]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#RESID]]
+
+; CHECK-DAG: %[[#IMARG:]] = OpFunctionParameter %[[#IMG1DB_RD]]
+; CHECK-DAG: %[[#SAMARG:]] = OpFunctionParameter %[[#SAMP]]
+; CHECK-DAG: %[[#SAMPIMVAR:]] = OpSampledImage %[[#SAMPIMG]] %[[#IMARG]] %[[#SAMARG]]
+; CHECK-DAG: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#SAMPIMVAR]]
+
+define spir_func void @bar(
+ target("spirv.DeviceEvent") %a,
+ target("spirv.Event") %b,
+ target("spirv.Queue") %c,
+ target("spirv.ReserveId") %d) {
+ ret void
+}
+
+define spir_func void @test_sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0) %srcimg.coerce,
+ target("spirv.Sampler") %s.coerce) {
+ %1 = tail call spir_func target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) @_Z20__spirv_SampledImagePU3AS1K34__spirv_Image__float_1_1_0_0_0_0_0PU3AS1K15__spirv_Sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0) %srcimg.coerce, target("spirv.Sampler") %s.coerce) #1
+ %2 = tail call spir_func <4 x float> @_Z38__spirv_ImageSampleExplicitLod_Rfloat4PU3AS120__spirv_SampledImageDv4_iif(target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) %1, <4 x i32> zeroinitializer, i32 2, float 1.000000e+00) #1
+ ret void
+}
+
+declare spir_func target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) @_Z20__spirv_SampledImagePU3AS1K34__spirv_Image__float_1_1_0_0_0_0_0PU3AS1K15__spirv_Sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0), target("spirv.Sampler"))
+
+declare spir_func <4 x float> @_Z38__spirv_ImageSampleExplicitLod_Rfloat4PU3AS120__spirv_SampledImageDv4_iif(target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0), <4 x i32>, i32, float)
+
+attributes #0 = { nounwind readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!opencl.enable.FP_CONTRACT = !{}
+!opencl.spir.version = !{!6}
+!opencl.ocl.version = !{!7}
+!opencl.used.extensions = !{!8}
+!opencl.used.optional.core.features = !{!9}
+!opencl.compiler.options = !{!8}
+
+!1 = !{i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1}
+!2 = !{!"read_only", !"write_only", !"read_only", !"read_only", !"read_only", !"read_only", !"read_only", !"write_only", !"read_write"}
+!3 = !{!"int", !"int", !"image1d_t", !"image2d_t", !"image3d_t", !"image2d_array_t", !"image1d_buffer_t", !"image1d_t", !"image2d_t"}
+!4 = !{!"int", !"int", !"image1d_t", !"image2d_t", !"image3d_t", !"image2d_array_t", !"image1d_buffer_t", !"image1d_t", !"image2d_t"}
+!5 = !{!"pipe", !"pipe", !"", !"", !"", !"", !"", !"", !""}
+!6 = !{i32 1, i32 2}
+!7 = !{i32 2, i32 0}
+!8 = !{!"cl_khr_fp16"}
+!9 = !{!"cl_images"}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll b/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll
new file mode 100644
index 0000000..63b2604
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll
@@ -0,0 +1,19 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#Fun:]] "_Z3booi"
+; CHECK-DAG: OpDecorate %[[#Param:]] FuncParamAttr Sret
+; CHECK-DAG: %[[#PtrTy:]] = OpTypePointer Function %[[#StructTy:]]
+; CHECK-DAG: %[[#StructTy]] = OpTypeStruct
+; CHECK: %[[#Fun]] = OpFunction %[[#]]
+; CHECK: %[[#Param]] = OpFunctionParameter %[[#PtrTy]]
+
+%struct.Example = type { }
+
+define spir_func i32 @foo() {
+ %1 = alloca %struct.Example, align 8
+ call void @_Z3booi(ptr sret(%struct.Example) align 8 %1, i32 noundef 42)
+ ret i32 0
+}
+
+declare void @_Z3booi(ptr sret(%struct.Example) align 8, i32 noundef)
diff --git a/llvm/test/CodeGen/SystemZ/vec-mul-07.ll b/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
index 73c7a8d..5835616 100644
--- a/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
@@ -3,6 +3,23 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+; Test a v16i8 -> v8i16 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <8 x i16> @f1_not(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhb %v0, %v24
+; CHECK-NEXT: vuplhb %v1, %v26
+; CHECK-NEXT: vmlhw %v24, %v0, %v1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <16 x i8> %val1, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %zext1 = zext <8 x i8> %shuf1 to <8 x i16>
+ %shuf2 = shufflevector <16 x i8> %val2, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %zext2 = zext <8 x i8> %shuf2 to <8 x i16>
+ %ret = mul <8 x i16> %zext1, %zext2
+ ret <8 x i16> %ret
+}
+
; Test a v16i8 (even) -> v8i16 unsigned widening multiplication.
define <8 x i16> @f1(<16 x i8> %val1, <16 x i8> %val2) {
; CHECK-LABEL: f1:
@@ -31,6 +48,23 @@ define <8 x i16> @f2(<16 x i8> %val1, <16 x i8> %val2) {
ret <8 x i16> %ret
}
+; Test a v16i8 -> v8i16 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <8 x i16> @f3_not(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphb %v0, %v26
+; CHECK-NEXT: vuphb %v1, %v24
+; CHECK-NEXT: vmlhw %v24, %v1, %v0
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <16 x i8> %val1, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %sext1 = sext <8 x i8> %shuf1 to <8 x i16>
+ %shuf2 = shufflevector <16 x i8> %val2, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %sext2 = sext <8 x i8> %shuf2 to <8 x i16>
+ %ret = mul <8 x i16> %sext1, %sext2
+ ret <8 x i16> %ret
+}
+
; Test a v16i8 (even) -> v8i16 signed widening multiplication.
define <8 x i16> @f3(<16 x i8> %val1, <16 x i8> %val2) {
; CHECK-LABEL: f3:
@@ -59,6 +93,23 @@ define <8 x i16> @f4(<16 x i8> %val1, <16 x i8> %val2) {
ret <8 x i16> %ret
}
+; Test a v8i16 -> v4i32 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <4 x i32> @f5_not(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f5_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhh %v0, %v24
+; CHECK-NEXT: vuplhh %v1, %v26
+; CHECK-NEXT: vmlf %v24, %v0, %v1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <8 x i16> %val1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %zext1 = zext <4 x i16> %shuf1 to <4 x i32>
+ %shuf2 = shufflevector <8 x i16> %val2, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %zext2 = zext <4 x i16> %shuf2 to <4 x i32>
+ %ret = mul <4 x i32> %zext1, %zext2
+ ret <4 x i32> %ret
+}
+
; Test a v8i16 (even) -> v4i32 unsigned widening multiplication.
define <4 x i32> @f5(<8 x i16> %val1, <8 x i16> %val2) {
; CHECK-LABEL: f5:
@@ -87,6 +138,23 @@ define <4 x i32> @f6(<8 x i16> %val1, <8 x i16> %val2) {
ret <4 x i32> %ret
}
+; Test a v8i16 -> v4i32 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <4 x i32> @f7_not(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f7_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphh %v0, %v26
+; CHECK-NEXT: vuphh %v1, %v24
+; CHECK-NEXT: vmlf %v24, %v1, %v0
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <8 x i16> %val1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %sext1 = sext <4 x i16> %shuf1 to <4 x i32>
+ %shuf2 = shufflevector <8 x i16> %val2, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %sext2 = sext <4 x i16> %shuf2 to <4 x i32>
+ %ret = mul <4 x i32> %sext1, %sext2
+ ret <4 x i32> %ret
+}
+
; Test a v8i16 (even) -> v4i32 signed widening multiplication.
define <4 x i32> @f7(<8 x i16> %val1, <8 x i16> %val2) {
; CHECK-LABEL: f7:
@@ -115,6 +183,29 @@ define <4 x i32> @f8(<8 x i16> %val1, <8 x i16> %val2) {
ret <4 x i32> %ret
}
+; Test a v4i32 -> v2i64 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <2 x i64> @f9_not(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f9_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhf %v0, %v24
+; CHECK-NEXT: vuplhf %v1, %v26
+; CHECK-NEXT: vlgvg %r0, %v1, 1
+; CHECK-NEXT: vlgvg %r1, %v0, 1
+; CHECK-NEXT: msgr %r1, %r0
+; CHECK-NEXT: vlgvg %r0, %v1, 0
+; CHECK-NEXT: vlgvg %r2, %v0, 0
+; CHECK-NEXT: msgr %r2, %r0
+; CHECK-NEXT: vlvgp %v24, %r2, %r1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <4 x i32> %val1, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %zext1 = zext <2 x i32> %shuf1 to <2 x i64>
+ %shuf2 = shufflevector <4 x i32> %val2, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %zext2 = zext <2 x i32> %shuf2 to <2 x i64>
+ %ret = mul <2 x i64> %zext1, %zext2
+ ret <2 x i64> %ret
+}
+
; Test a v4i32 (even) -> v2i64 unsigned widening multiplication.
define <2 x i64> @f9(<4 x i32> %val1, <4 x i32> %val2) {
; CHECK-LABEL: f9:
@@ -143,6 +234,29 @@ define <2 x i64> @f10(<4 x i32> %val1, <4 x i32> %val2) {
ret <2 x i64> %ret
}
+; Test a v4i32 -> v2i64 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <2 x i64> @f11_not(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f11_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphf %v0, %v24
+; CHECK-NEXT: vuphf %v1, %v26
+; CHECK-NEXT: vlgvg %r0, %v1, 1
+; CHECK-NEXT: vlgvg %r1, %v0, 1
+; CHECK-NEXT: msgr %r1, %r0
+; CHECK-NEXT: vlgvg %r0, %v1, 0
+; CHECK-NEXT: vlgvg %r2, %v0, 0
+; CHECK-NEXT: msgr %r2, %r0
+; CHECK-NEXT: vlvgp %v24, %r2, %r1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <4 x i32> %val1, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %sext1 = sext <2 x i32> %shuf1 to <2 x i64>
+ %shuf2 = shufflevector <4 x i32> %val2, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %sext2 = sext <2 x i32> %shuf2 to <2 x i64>
+ %ret = mul <2 x i64> %sext1, %sext2
+ ret <2 x i64> %ret
+}
+
; Test a v4i32 (even) -> v2i64 signed widening multiplication.
define <2 x i64> @f11(<4 x i32> %val1, <4 x i32> %val2) {
; CHECK-LABEL: f11:
diff --git a/llvm/test/CodeGen/WebAssembly/narrow-simd-mul.ll b/llvm/test/CodeGen/WebAssembly/narrow-simd-mul.ll
new file mode 100644
index 0000000..1f6c960
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/narrow-simd-mul.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=wasm32 -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 | FileCheck %s
+
+define <8 x i8> @mul_v8i8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: mul_v8i8:
+; CHECK: .functype mul_v8i8 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: i8x16.extract_lane_u $push4=, $0, 0
+; CHECK-NEXT: i8x16.extract_lane_u $push3=, $1, 0
+; CHECK-NEXT: i32.mul $push5=, $pop4, $pop3
+; CHECK-NEXT: i8x16.splat $push6=, $pop5
+; CHECK-NEXT: i8x16.extract_lane_u $push1=, $0, 1
+; CHECK-NEXT: i8x16.extract_lane_u $push0=, $1, 1
+; CHECK-NEXT: i32.mul $push2=, $pop1, $pop0
+; CHECK-NEXT: i8x16.replace_lane $push7=, $pop6, 1, $pop2
+; CHECK-NEXT: i8x16.extract_lane_u $push9=, $0, 2
+; CHECK-NEXT: i8x16.extract_lane_u $push8=, $1, 2
+; CHECK-NEXT: i32.mul $push10=, $pop9, $pop8
+; CHECK-NEXT: i8x16.replace_lane $push11=, $pop7, 2, $pop10
+; CHECK-NEXT: i8x16.extract_lane_u $push13=, $0, 3
+; CHECK-NEXT: i8x16.extract_lane_u $push12=, $1, 3
+; CHECK-NEXT: i32.mul $push14=, $pop13, $pop12
+; CHECK-NEXT: i8x16.replace_lane $push15=, $pop11, 3, $pop14
+; CHECK-NEXT: i8x16.extract_lane_u $push17=, $0, 4
+; CHECK-NEXT: i8x16.extract_lane_u $push16=, $1, 4
+; CHECK-NEXT: i32.mul $push18=, $pop17, $pop16
+; CHECK-NEXT: i8x16.replace_lane $push19=, $pop15, 4, $pop18
+; CHECK-NEXT: i8x16.extract_lane_u $push21=, $0, 5
+; CHECK-NEXT: i8x16.extract_lane_u $push20=, $1, 5
+; CHECK-NEXT: i32.mul $push22=, $pop21, $pop20
+; CHECK-NEXT: i8x16.replace_lane $push23=, $pop19, 5, $pop22
+; CHECK-NEXT: i8x16.extract_lane_u $push25=, $0, 6
+; CHECK-NEXT: i8x16.extract_lane_u $push24=, $1, 6
+; CHECK-NEXT: i32.mul $push26=, $pop25, $pop24
+; CHECK-NEXT: i8x16.replace_lane $push27=, $pop23, 6, $pop26
+; CHECK-NEXT: i8x16.extract_lane_u $push29=, $0, 7
+; CHECK-NEXT: i8x16.extract_lane_u $push28=, $1, 7
+; CHECK-NEXT: i32.mul $push30=, $pop29, $pop28
+; CHECK-NEXT: i8x16.replace_lane $push31=, $pop27, 7, $pop30
+; CHECK-NEXT: i8x16.extract_lane_u $push33=, $0, 8
+; CHECK-NEXT: i8x16.extract_lane_u $push32=, $1, 8
+; CHECK-NEXT: i32.mul $push34=, $pop33, $pop32
+; CHECK-NEXT: i8x16.replace_lane $push35=, $pop31, 8, $pop34
+; CHECK-NEXT: i8x16.extract_lane_u $push37=, $0, 9
+; CHECK-NEXT: i8x16.extract_lane_u $push36=, $1, 9
+; CHECK-NEXT: i32.mul $push38=, $pop37, $pop36
+; CHECK-NEXT: i8x16.replace_lane $push39=, $pop35, 9, $pop38
+; CHECK-NEXT: i8x16.extract_lane_u $push41=, $0, 10
+; CHECK-NEXT: i8x16.extract_lane_u $push40=, $1, 10
+; CHECK-NEXT: i32.mul $push42=, $pop41, $pop40
+; CHECK-NEXT: i8x16.replace_lane $push43=, $pop39, 10, $pop42
+; CHECK-NEXT: i8x16.extract_lane_u $push45=, $0, 11
+; CHECK-NEXT: i8x16.extract_lane_u $push44=, $1, 11
+; CHECK-NEXT: i32.mul $push46=, $pop45, $pop44
+; CHECK-NEXT: i8x16.replace_lane $push47=, $pop43, 11, $pop46
+; CHECK-NEXT: i8x16.extract_lane_u $push49=, $0, 12
+; CHECK-NEXT: i8x16.extract_lane_u $push48=, $1, 12
+; CHECK-NEXT: i32.mul $push50=, $pop49, $pop48
+; CHECK-NEXT: i8x16.replace_lane $push51=, $pop47, 12, $pop50
+; CHECK-NEXT: i8x16.extract_lane_u $push53=, $0, 13
+; CHECK-NEXT: i8x16.extract_lane_u $push52=, $1, 13
+; CHECK-NEXT: i32.mul $push54=, $pop53, $pop52
+; CHECK-NEXT: i8x16.replace_lane $push55=, $pop51, 13, $pop54
+; CHECK-NEXT: i8x16.extract_lane_u $push57=, $0, 14
+; CHECK-NEXT: i8x16.extract_lane_u $push56=, $1, 14
+; CHECK-NEXT: i32.mul $push58=, $pop57, $pop56
+; CHECK-NEXT: i8x16.replace_lane $push59=, $pop55, 14, $pop58
+; CHECK-NEXT: i8x16.extract_lane_u $push61=, $0, 15
+; CHECK-NEXT: i8x16.extract_lane_u $push60=, $1, 15
+; CHECK-NEXT: i32.mul $push62=, $pop61, $pop60
+; CHECK-NEXT: i8x16.replace_lane $push63=, $pop59, 15, $pop62
+; CHECK-NEXT: return $pop63
+ %mul = mul <8 x i8> %a, %b
+ ret <8 x i8> %mul
+}
+
+define <4 x i16> @mul_v4i16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: mul_v4i16:
+; CHECK: .functype mul_v4i16 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: i16x8.mul $push0=, $0, $1
+; CHECK-NEXT: return $pop0
+ %mul = mul <4 x i16> %a, %b
+ ret <4 x i16> %mul
+}
+
+define <2 x i32> @mul_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: mul_v2i32:
+; CHECK: .functype mul_v2i32 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: i32x4.mul $push0=, $0, $1
+; CHECK-NEXT: return $pop0
+ %mul = mul <2 x i32> %a, %b
+ ret <2 x i32> %mul
+}
diff --git a/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll b/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
index 60cfc27..4a4973b 100644
--- a/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
+++ b/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
@@ -68,9 +68,9 @@ target triple = "wasm32-unknown-unknown"
; bleeding-edge: +atomics, +bulk-memory, +bulk-memory-opt,
; +call-indirect-overlong, +exception-handling,
-; +extended-const, +fp16, +multimemory, +multivalue,
+; +extended-const, +fp16, +gc, +multimemory, +multivalue,
; +mutable-globals, +nontrapping-fptoint, +relaxed-simd,
-; +reference-types, +simd128, +sign-ext, +tail-call, +gc
+; +reference-types, +simd128, +sign-ext, +tail-call
; BLEEDING-EDGE-LABEL: .section .custom_section.target_features,"",@
; BLEEDING-EDGE-NEXT: .int8 17
; BLEEDING-EDGE-NEXT: .int8 43
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir
index ba72c4f..bbb09c6 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir
@@ -10,18 +10,18 @@ body: |
bb.0:
; X32-LABEL: name: test_memop_s8tos32
; X32: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
- ; X32: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load (s1))
- ; X32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load (s8))
- ; X32: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p0) :: (load (s16))
- ; X32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32))
- ; X32: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[DEF]](p0) :: (load (p0))
- ; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
- ; X32: [[AND:%[0-9]+]]:_(s8) = G_AND [[LOAD]], [[C]]
- ; X32: G_STORE [[AND]](s8), [[DEF]](p0) :: (store (s8))
- ; X32: G_STORE [[LOAD1]](s8), [[DEF]](p0) :: (store (s8))
- ; X32: G_STORE [[LOAD2]](s16), [[DEF]](p0) :: (store (s16))
- ; X32: G_STORE [[LOAD3]](s32), [[DEF]](p0) :: (store (s32))
- ; X32: G_STORE [[LOAD4]](p0), [[DEF]](p0) :: (store (p0))
+ ; X32-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load (s1))
+ ; X32-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load (s8))
+ ; X32-NEXT: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p0) :: (load (s16))
+ ; X32-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32))
+ ; X32-NEXT: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[DEF]](p0) :: (load (p0))
+ ; X32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; X32-NEXT: [[AND:%[0-9]+]]:_(s8) = G_AND [[LOAD]], [[C]]
+ ; X32-NEXT: G_STORE [[AND]](s8), [[DEF]](p0) :: (store (s8))
+ ; X32-NEXT: G_STORE [[LOAD1]](s8), [[DEF]](p0) :: (store (s8))
+ ; X32-NEXT: G_STORE [[LOAD2]](s16), [[DEF]](p0) :: (store (s16))
+ ; X32-NEXT: G_STORE [[LOAD3]](s32), [[DEF]](p0) :: (store (s32))
+ ; X32-NEXT: G_STORE [[LOAD4]](p0), [[DEF]](p0) :: (store (p0))
%0:_(p0) = IMPLICIT_DEF
%9:_(s1) = G_LOAD %0 :: (load (s1))
%1:_(s8) = G_LOAD %0 :: (load (s8))
@@ -46,13 +46,13 @@ body: |
; X32-LABEL: name: test_memop_s64
; X32: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
- ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32), align 8)
- ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; X32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[DEF]], [[C]](s32)
- ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
- ; X32: G_STORE [[LOAD]](s32), [[DEF]](p0) :: (store (s32), align 8)
- ; X32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[DEF]], [[C]](s32)
- ; X32: G_STORE [[LOAD1]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 4)
+ ; X32-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32), align 8)
+ ; X32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; X32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[DEF]], [[C]](s32)
+ ; X32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
+ ; X32-NEXT: G_STORE [[LOAD]](s32), [[DEF]](p0) :: (store (s32), align 8)
+ ; X32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[DEF]], [[C]](s32)
+ ; X32-NEXT: G_STORE [[LOAD1]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 4)
%0:_(p0) = IMPLICIT_DEF
%1:_(s64) = G_LOAD %0 :: (load (s64))
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-undef.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef.mir
index 8711d84..b16fe3e 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-undef.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef.mir
@@ -21,6 +21,7 @@ body: |
; X64-NEXT: G_STORE [[DEF3]](s32), [[DEF]](p0) :: (store (s32))
; X64-NEXT: [[DEF4:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; X64-NEXT: G_STORE [[DEF4]](s64), [[DEF]](p0) :: (store (s64))
+ ;
; X32-LABEL: name: test_implicit_def
; X32: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
; X32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
@@ -35,7 +36,7 @@ body: |
; X32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF4]](s64)
; X32-NEXT: G_STORE [[UV]](s32), [[DEF]](p0) :: (store (s32), align 8)
; X32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; X32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[DEF]], [[C1]](s32)
+ ; X32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[DEF]], [[C1]](s32)
; X32-NEXT: G_STORE [[UV1]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
%5:_(p0) = G_IMPLICIT_DEF
%0:_(s1) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll
index 99d458a..83c319b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll
@@ -164,12 +164,12 @@ define void @f5(ptr %a, ptr %b) {
; X86-NEXT: [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
; X86-NEXT: [[LOAD2:%[0-9]+]]:gpr(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.a, align 8)
; X86-NEXT: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 4
- ; X86-NEXT: [[PTR_ADD:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[LOAD]], [[C]](s32)
+ ; X86-NEXT: [[PTR_ADD:%[0-9]+]]:gpr(p0) = nuw inbounds G_PTR_ADD [[LOAD]], [[C]](s32)
; X86-NEXT: [[COPY:%[0-9]+]]:gpr(p0) = COPY [[PTR_ADD]](p0)
; X86-NEXT: [[LOAD3:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
; X86-NEXT: [[MV:%[0-9]+]]:gpr(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
; X86-NEXT: [[LOAD4:%[0-9]+]]:gpr(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.b, align 8)
- ; X86-NEXT: [[PTR_ADD1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[LOAD1]], [[C]](s32)
+ ; X86-NEXT: [[PTR_ADD1:%[0-9]+]]:gpr(p0) = nuw inbounds G_PTR_ADD [[LOAD1]], [[C]](s32)
; X86-NEXT: [[LOAD5:%[0-9]+]]:gpr(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
; X86-NEXT: [[MV1:%[0-9]+]]:gpr(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
; X86-NEXT: [[COPY1:%[0-9]+]]:psr(s64) = COPY [[MV]](s64)
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
index 171ccb2..2f1f8bc 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
@@ -77,12 +77,12 @@ define { double, double } @test_return_d2(double %d.coerce0, double %d.coerce1)
; ALL-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d
; ALL-NEXT: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.1)
; ALL-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; ALL-NEXT: %5:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX1]], [[C1]](s64)
- ; ALL-NEXT: G_STORE [[COPY1]](s64), %5(p0) :: (store (s64) into %ir.2)
+ ; ALL-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX1]], [[C1]](s64)
+ ; ALL-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.2)
; ALL-NEXT: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store (s8) into %ir.3, align 8), (load (s8) from %ir.4, align 8)
; ALL-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.5)
- ; ALL-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
- ; ALL-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s64) from %ir.5 + 8)
+ ; ALL-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
+ ; ALL-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (dereferenceable load (s64) from %ir.5 + 8)
; ALL-NEXT: $xmm0 = COPY [[LOAD]](s64)
; ALL-NEXT: $xmm1 = COPY [[LOAD1]](s64)
; ALL-NEXT: RET 0, implicit $xmm0, implicit $xmm1
@@ -170,14 +170,14 @@ define { i64, i32 } @test_return_i3(i64 %i.coerce0, i32 %i.coerce1) {
; ALL-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.3.tmp
; ALL-NEXT: G_STORE [[COPY]](s64), [[FRAME_INDEX2]](p0) :: (store (s64) into %ir.0, align 4)
; ALL-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; ALL-NEXT: %7:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX2]], [[C1]](s64)
- ; ALL-NEXT: G_STORE [[COPY1]](s32), %7(p0) :: (store (s32) into %ir.1)
+ ; ALL-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX2]], [[C1]](s64)
+ ; ALL-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.1)
; ALL-NEXT: G_MEMCPY [[FRAME_INDEX1]](p0), [[FRAME_INDEX2]](p0), [[C]](s64), 0 :: (store (s8) into %ir.2, align 4), (load (s8) from %ir.3, align 4)
; ALL-NEXT: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store (s8) into %ir.4, align 4), (load (s8) from %ir.5, align 4)
; ALL-NEXT: G_MEMCPY [[FRAME_INDEX3]](p0), [[FRAME_INDEX]](p0), [[C]](s64), 0 :: (store (s8) into %ir.6, align 8), (load (s8) from %ir.7, align 4)
; ALL-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX3]](p0) :: (dereferenceable load (s64) from %ir.tmp)
- ; ALL-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s64)
- ; ALL-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s32) from %ir.tmp + 8, align 8)
+ ; ALL-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s64)
+ ; ALL-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (dereferenceable load (s32) from %ir.tmp + 8, align 8)
; ALL-NEXT: $rax = COPY [[LOAD]](s64)
; ALL-NEXT: $edx = COPY [[LOAD1]](s32)
; ALL-NEXT: RET 0, implicit $rax, implicit $edx
@@ -215,12 +215,12 @@ define { i64, i64 } @test_return_i4(i64 %i.coerce0, i64 %i.coerce1) {
; ALL-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
; ALL-NEXT: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.1, align 4)
; ALL-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
- ; ALL-NEXT: %5:_(p0) = nuw nusw G_PTR_ADD [[FRAME_INDEX1]], [[C1]](s64)
- ; ALL-NEXT: G_STORE [[COPY1]](s64), %5(p0) :: (store (s64) into %ir.2, align 4)
+ ; ALL-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[FRAME_INDEX1]], [[C1]](s64)
+ ; ALL-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.2, align 4)
; ALL-NEXT: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store (s8) into %ir.3, align 4), (load (s8) from %ir.4, align 4)
; ALL-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.5, align 4)
- ; ALL-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
- ; ALL-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s64) from %ir.5 + 8, align 4)
+ ; ALL-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
+ ; ALL-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (dereferenceable load (s64) from %ir.5 + 8, align 4)
; ALL-NEXT: $rax = COPY [[LOAD]](s64)
; ALL-NEXT: $rdx = COPY [[LOAD1]](s64)
; ALL-NEXT: RET 0, implicit $rax, implicit $rdx
diff --git a/llvm/test/CodeGen/X86/apx/cf.ll b/llvm/test/CodeGen/X86/apx/cf.ll
index 1e4ac3f..e52ce6c 100644
--- a/llvm/test/CodeGen/X86/apx/cf.ll
+++ b/llvm/test/CodeGen/X86/apx/cf.ll
@@ -162,7 +162,7 @@ entry:
define void @load_zext(i1 %cond, ptr %b, ptr %p) {
; CHECK-LABEL: load_zext:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: andb $1, %dil
+; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: cfcmovnew (%rsi), %ax
; CHECK-NEXT: movzwl %ax, %eax
; CHECK-NEXT: cfcmovnel %eax, (%rdx)
@@ -180,7 +180,7 @@ entry:
define void @load_sext(i1 %cond, ptr %b, ptr %p) {
; CHECK-LABEL: load_sext:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: andb $1, %dil
+; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: cfcmovnel (%rsi), %eax
; CHECK-NEXT: cltq
; CHECK-NEXT: cfcmovneq %rax, (%rdx)
@@ -194,3 +194,38 @@ entry:
call void @llvm.masked.store.v1i64.p0(<1 x i64> %3, ptr %p, i32 4, <1 x i1> %0)
ret void
}
+
+define void @sink_gep(ptr %p, i1 %cond) {
+; CHECK-LABEL: sink_gep:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb $1, %sil
+; CHECK-NEXT: cfcmovnel %eax, 112(%rdi)
+; CHECK-NEXT: cfcmovnel 112(%rdi), %eax
+; CHECK-NEXT: movl %eax, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = getelementptr i8, ptr %p, i64 112
+ br label %next
+
+next:
+ %1 = bitcast i1 %cond to <1 x i1>
+ call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %0, i32 1, <1 x i1> %1)
+ %2 = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr %0, i32 1, <1 x i1> %1, <1 x i32> zeroinitializer)
+ store <1 x i32> %2, ptr %p, align 4
+ ret void
+}
+
+define void @xor_cond(ptr %p, i1 %cond) {
+; CHECK-LABEL: xor_cond:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb $1, %sil
+; CHECK-NEXT: cfcmovel %eax, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = xor i1 %cond, true
+ %1 = insertelement <1 x i1> zeroinitializer, i1 %0, i64 0
+ call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %p, i32 1, <1 x i1> %1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
new file mode 100644
index 0000000..1136287
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
@@ -0,0 +1,43 @@
+;; Test if temporary labels are generated for each indirect callsite with a callee_type metadata.
+;; Test if the .callgraph section contains the MD5 hash of callee type ids generated from
+;; generalized type id strings.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -o - < %s | FileCheck %s
+
+; CHECK: ball:
+; CHECK-NEXT: [[LABEL_FUNC:\.Lfunc_begin[0-9]+]]:
+define ptr @ball() {
+entry:
+ %fp_foo_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP0:\.L.*]]:
+ call void (...) %fp_foo_val(), !callee_type !0
+ %fp_bar_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP1:\.L.*]]:
+ %call_fp_bar = call i32 %fp_bar_val(i8 0), !callee_type !2
+ %fp_baz_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP2:\.L.*]]:
+ %call_fp_baz = call ptr %fp_baz_val(ptr null), !callee_type !4
+ ret ptr %call_fp_baz
+}
+
+; CHECK: .section .callgraph,"o",@progbits,.text
+
+; CHECK-NEXT: .quad 0
+; CHECK-NEXT: .quad [[LABEL_FUNC]]
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad 3
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvE.generalized"}
+;; Test for MD5 hash of _ZTSFvE.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 4524972987496481828
+; CHECK-NEXT: .quad [[LABEL_TMP0]]
+!2 = !{!3}
+!3 = !{i64 0, !"_ZTSFicE.generalized"}
+;; Test for MD5 hash of _ZTSFicE.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 3498816979441845844
+; CHECK-NEXT: .quad [[LABEL_TMP1]]
+!4 = !{!5}
+!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
+;; Test for MD5 hash of _ZTSFPvS_E.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 8646233951371320954
+; CHECK-NEXT: .quad [[LABEL_TMP2]]
diff --git a/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll
new file mode 100644
index 0000000..fa14a98
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll
@@ -0,0 +1,34 @@
+;; Tests that we store the type identifiers in .callgraph section of the object file for tailcalls.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \
+; RUN: llvm-readelf -x .callgraph - | FileCheck %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+define i32 @main(i32 %argc) !type !3 {
+entry:
+ %andop = and i32 %argc, 1
+ %cmp = icmp eq i32 %andop, 0
+ %foo.bar = select i1 %cmp, ptr @foo, ptr @bar
+ %call.i = tail call i32 %foo.bar(i8 signext 97), !callee_type !1
+ ret i32 %call.i
+}
+
+declare !type !2 i32 @foo(i8 signext)
+
+declare !type !2 i32 @bar(i8 signext)
+
+;; Check that the numeric type id (md5 hash) for the below type ids are emitted
+;; to the callgraph section.
+
+; CHECK: Hex dump of section '.callgraph':
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+; CHECK-DAG: 5486bc59 814b8e30
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
+!3 = !{i64 0, !"_ZTSFiiE.generalized"}
diff --git a/llvm/test/CodeGen/X86/call-graph-section.ll b/llvm/test/CodeGen/X86/call-graph-section.ll
new file mode 100644
index 0000000..4a9840e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section.ll
@@ -0,0 +1,38 @@
+;; Tests that we store the type identifiers in .callgraph section of the object file.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \
+; RUN: llvm-readelf -x .callgraph - | FileCheck %s
+
+declare !type !0 void @foo()
+
+declare !type !1 i32 @bar(i8)
+
+declare !type !2 ptr @baz(ptr)
+
+define void @main() {
+entry:
+ %a = alloca i8, align 1
+ %fp_foo_val = load ptr, ptr null, align 8
+ call void (...) %fp_foo_val(), !callee_type !1
+ %fp_bar_val = load ptr, ptr null, align 8
+ %param = trunc i64 0 to i8
+ %call_fp_bar = call i32 %fp_bar_val(i8 signext %param), !callee_type !3
+ %fp_baz_val = load ptr, ptr null, align 8
+ %call_fp_baz = call ptr %fp_baz_val(ptr %a), !callee_type !4
+ ret void
+}
+
+;; Check that the numeric type id (md5 hash) for the below type ids are emitted
+;; to the callgraph section.
+
+; CHECK: Hex dump of section '.callgraph':
+
+; CHECK-DAG: 2444f731 f5eecb3e
+!0 = !{i64 0, !"_ZTSFvE.generalized"}
+!1 = !{!0}
+; CHECK-DAG: 5486bc59 814b8e30
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
+!3 = !{!2}
+; CHECK-DAG: 7ade6814 f897fd77
+!4 = !{!5}
+!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
diff --git a/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..7881ea7
--- /dev/null
+++ b/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple x86_64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..8f6b7a6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=x86_64-unknown-linux < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..e97a6ac
--- /dev/null
+++ b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=x86_64-unknown-linux < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll b/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll
new file mode 100644
index 0000000..ea7454f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll
@@ -0,0 +1,185 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=x86_64-grtev4-linux-gnu < %s | FileCheck %s
+
+%struct.wibble = type { %struct.wombat }
+%struct.wombat = type { %struct.ham, [3 x i8] }
+%struct.ham = type { %struct.zot }
+%struct.zot = type { %struct.blam }
+%struct.blam = type { %struct.ham.0 }
+%struct.ham.0 = type { %struct.bar }
+%struct.bar = type { %struct.bar.1 }
+%struct.bar.1 = type { %struct.baz, i8 }
+%struct.baz = type { %struct.snork }
+%struct.snork = type <{ %struct.spam, i8, [3 x i8] }>
+%struct.spam = type { %struct.snork.2, %struct.snork.2 }
+%struct.snork.2 = type { i32 }
+%struct.snork.3 = type { %struct.baz, i8, [3 x i8] }
+
+define void @foo(ptr %arg, ptr %arg1, i40 %arg2, ptr %arg3, i32 %arg4) #0 {
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $24, %rsp
+; CHECK-NEXT: .cfi_offset %rbx, -56
+; CHECK-NEXT: .cfi_offset %r12, -48
+; CHECK-NEXT: .cfi_offset %r13, -40
+; CHECK-NEXT: .cfi_offset %r14, -32
+; CHECK-NEXT: .cfi_offset %r15, -24
+; CHECK-NEXT: movl %r8d, %r14d
+; CHECK-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT: movq %rsi, %r13
+; CHECK-NEXT: movq %rdi, %r15
+; CHECK-NEXT: incl %r14d
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: # implicit-def: $r12
+; CHECK-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT: jmp .LBB0_3
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_1: # %bb17
+; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: movq %r15, %r13
+; CHECK-NEXT: xorl %r15d, %r15d
+; CHECK-NEXT: testq %rbx, %rbx
+; CHECK-NEXT: sete %r15b
+; CHECK-NEXT: xorl %edi, %edi
+; CHECK-NEXT: callq _Znwm@PLT
+; CHECK-NEXT: shll $4, %r15d
+; CHECK-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; CHECK-NEXT: movq %r12, %rcx
+; CHECK-NEXT: shrq $32, %rcx
+; CHECK-NEXT: movb %cl, 12(%rax)
+; CHECK-NEXT: movl %r12d, 8(%rax)
+; CHECK-NEXT: movq %r15, %rbx
+; CHECK-NEXT: movq %r13, %r15
+; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; CHECK-NEXT: decl %r14d
+; CHECK-NEXT: je .LBB0_8
+; CHECK-NEXT: .LBB0_3: # %bb7
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: callq widget@PLT
+; CHECK-NEXT: cmpb $-5, (%r13)
+; CHECK-NEXT: jae .LBB0_5
+; CHECK-NEXT: # %bb.4: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: movl %r12d, %r12d
+; CHECK-NEXT: cmpq %r15, %rbx
+; CHECK-NEXT: jbe .LBB0_1
+; CHECK-NEXT: jmp .LBB0_7
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_5: # %bb12
+; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: movq 0, %rax
+; CHECK-NEXT: movq 8, %rax
+; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; CHECK-NEXT: cmpq %r15, %rbx
+; CHECK-NEXT: jbe .LBB0_1
+; CHECK-NEXT: .LBB0_7: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: decl %r14d
+; CHECK-NEXT: jne .LBB0_3
+; CHECK-NEXT: .LBB0_8: # %bb21
+; CHECK-NEXT: cmpb $0, 12(%rax)
+; CHECK-NEXT: jne .LBB0_10
+; CHECK-NEXT: # %bb.9: # %bb26
+; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_10: # %bb25
+; CHECK-NEXT: .cfi_def_cfa %rbp, 16
+; CHECK-NEXT: movq %r15, %rdi
+; CHECK-NEXT: callq pluto@PLT
+bb:
+ br label %bb7
+
+bb5: ; preds = %bb17, %bb14
+ %phi = phi ptr [ %call19, %bb17 ], [ null, %bb14 ]
+ %phi6 = phi ptr [ %getelementptr, %bb17 ], [ null, %bb14 ]
+ %add = add i32 %phi9, 1
+ %icmp = icmp eq i32 %phi9, %arg4
+ br i1 %icmp, label %bb21, label %bb7
+
+bb7: ; preds = %bb5, %bb
+ %phi8 = phi ptr [ null, %bb ], [ %phi6, %bb5 ]
+ %phi9 = phi i32 [ 0, %bb ], [ %add, %bb5 ]
+ %phi10 = phi i40 [ poison, %bb ], [ %phi15, %bb5 ]
+ %call = call ptr @widget()
+ %load = load i8, ptr %arg1, align 8
+ %icmp11 = icmp ult i8 %load, -5
+ %and = and i40 %phi10, 4294967295
+ br i1 %icmp11, label %bb14, label %bb12
+
+bb12: ; preds = %bb7
+ %load13 = load volatile { i64, i64 }, ptr null, align 4294967296
+ br label %bb14
+
+bb14: ; preds = %bb12, %bb7
+ %phi15 = phi i40 [ %and, %bb7 ], [ %arg2, %bb12 ]
+ %icmp16 = icmp ugt ptr %phi8, %arg
+ br i1 %icmp16, label %bb5, label %bb17
+
+bb17: ; preds = %bb14
+ %icmp18 = icmp eq ptr %phi8, null
+ %zext = zext i1 %icmp18 to i64
+ %call19 = call ptr @_Znwm(i64 0)
+ %getelementptr = getelementptr %struct.wibble, ptr %arg3, i64 %zext
+ %getelementptr20 = getelementptr i8, ptr %call19, i64 8
+ store i40 %phi15, ptr %getelementptr20, align 4
+ br label %bb5
+
+bb21: ; preds = %bb5
+ %getelementptr22 = getelementptr %struct.snork.3, ptr %phi, i64 0, i32 1
+ %load23 = load i8, ptr %getelementptr22, align 4
+ %icmp24 = icmp eq i8 %load23, 0
+ br i1 %icmp24, label %bb26, label %bb25
+
+bb25: ; preds = %bb21
+ call void @pluto(ptr %arg)
+ unreachable
+
+bb26: ; preds = %bb21
+ ret void
+}
+
+define void @eggs(ptr %arg, ptr %arg1) {
+; CHECK-LABEL: eggs:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: movq %rsi, %rdi
+; CHECK-NEXT: movq %rax, %rsi
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: xorl %r8d, %r8d
+; CHECK-NEXT: callq foo@PLT
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+bb:
+ call void @foo(ptr %arg1, ptr %arg, i40 0, ptr null, i32 0)
+ ret void
+}
+
+declare ptr @widget()
+
+declare void @pluto(ptr)
+
+declare ptr @_Znwm(i64)
+
+attributes #0 = { noinline "frame-pointer"="all" }
diff --git a/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir b/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir
index 8241a17..0bc208d 100644
--- a/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir
+++ b/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
-# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=register-coalescer -o - %s | FileCheck %s
+# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=register-coalescer -o - %s | FileCheck %s --match-full-lines
---
name: rematerialize_subreg_to_reg_added_impdef_1
tracksRegLiveness: true
@@ -9,7 +9,7 @@ body: |
; CHECK-NEXT: successors: %bb.1(0x2aaaaaab), %bb.2(0x55555555)
; CHECK-NEXT: liveins: $edi
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]]
; CHECK-NEXT: JCC_1 %bb.2, 5, implicit killed undef $eflags
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
@@ -28,7 +28,7 @@ body: |
; CHECK-NEXT: JCC_1 %bb.5, 5, implicit killed undef $eflags
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4:
- ; CHECK-NEXT: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al
+ ; CHECK-NEXT: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al, implicit-def $al
; CHECK-NEXT: RET 0, killed undef $al
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.5:
diff --git a/llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir b/llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir
new file mode 100644
index 0000000..2e6395f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir
@@ -0,0 +1,44 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+# RUN: llc -mtriple=x86_64-- -run-pass=register-coalescer -enable-subreg-liveness -verify-coalescing -o - %s | FileCheck %s
+
+---
+name: requires_new_subrange_coalesce_subreg_to_reg
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: requires_new_subrange_coalesce_subreg_to_reg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $eax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef %a.sub_32bit:gr64_with_sub_8bit = COPY $eax
+ ; CHECK-NEXT: %b:gr32 = IMPLICIT_DEF
+ ; CHECK-NEXT: %c:gr64 = INSERT_SUBREG %a, %b, %subreg.sub_32bit
+ ; CHECK-NEXT: JCC_1 %bb.2, 4, implicit undef $eflags
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef %a.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: %c.sub_32bit:gr64 = COPY %a
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: %c.sub_32bit:gr64 = SUBREG_TO_REG %a, %b, %subreg.sub_32bit
+ ; CHECK-NEXT: RET 0, implicit %c
+ bb.0:
+ liveins: $eax
+ %init_eax:gr32 = COPY $eax
+ %a:gr64 = SUBREG_TO_REG 0, %init_eax, %subreg.sub_32bit
+ %b:gr32 = IMPLICIT_DEF
+ %c:gr64 = INSERT_SUBREG %a, %b, %subreg.sub_32bit
+ JCC_1 %bb.2, 4, implicit undef $eflags
+
+ bb.1:
+ %imm0:gr32 = MOV32r0 implicit-def dead $eflags
+ %a = SUBREG_TO_REG 0, %imm0, %subreg.sub_32bit
+ %c.sub_32bit = COPY %a
+
+ bb.2:
+ %c.sub_32bit = SUBREG_TO_REG %a, %b, %subreg.sub_32bit
+ RET 0, implicit %c
+
+...
diff --git a/llvm/test/CodeGen/X86/combine-add-ssat.ll b/llvm/test/CodeGen/X86/combine-add-ssat.ll
index 3e21798..75adcdd 100644
--- a/llvm/test/CodeGen/X86/combine-add-ssat.ll
+++ b/llvm/test/CodeGen/X86/combine-add-ssat.ll
@@ -62,12 +62,12 @@ define <8 x i16> @combine_constfold_v8i16() {
define <8 x i16> @combine_constfold_undef_v8i16() {
; SSE-LABEL: combine_constfold_undef_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65535,65535,65534,0,65280,32768,0]
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65535,u,65534,0,65280,32768,0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_constfold_undef_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65535,65535,65534,0,65280,32768,0]
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65535,u,65534,0,65280,32768,0]
; AVX-NEXT: retq
%res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>)
ret <8 x i16> %res
diff --git a/llvm/test/CodeGen/X86/combine-add-usat.ll b/llvm/test/CodeGen/X86/combine-add-usat.ll
index 13bc3b2..5b947dd 100644
--- a/llvm/test/CodeGen/X86/combine-add-usat.ll
+++ b/llvm/test/CodeGen/X86/combine-add-usat.ll
@@ -62,12 +62,13 @@ define <8 x i16> @combine_constfold_v8i16() {
define <8 x i16> @combine_constfold_undef_v8i16() {
; SSE-LABEL: combine_constfold_undef_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535]
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65535,u,65535,65535,65535,2,65535]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_constfold_undef_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = [65535,65535,2,65535,65535,65535,2,65535]
+; AVX-NEXT: # xmm0 = mem[0,0]
; AVX-NEXT: retq
%res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
ret <8 x i16> %res
diff --git a/llvm/test/CodeGen/X86/combine-sub-ssat.ll b/llvm/test/CodeGen/X86/combine-sub-ssat.ll
index 979331f..0dab025 100644
--- a/llvm/test/CodeGen/X86/combine-sub-ssat.ll
+++ b/llvm/test/CodeGen/X86/combine-sub-ssat.ll
@@ -62,12 +62,12 @@ define <8 x i16> @combine_constfold_v8i16() {
define <8 x i16> @combine_constfold_undef_v8i16() {
; SSE-LABEL: combine_constfold_undef_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65534,65282,32786,2]
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,u,0,65534,65282,32786,2]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_constfold_undef_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65534,65282,32786,2]
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,u,0,65534,65282,32786,2]
; AVX-NEXT: retq
%res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>)
ret <8 x i16> %res
diff --git a/llvm/test/CodeGen/X86/combine-sub-usat.ll b/llvm/test/CodeGen/X86/combine-sub-usat.ll
index b70e3fc..36e374b 100644
--- a/llvm/test/CodeGen/X86/combine-sub-usat.ll
+++ b/llvm/test/CodeGen/X86/combine-sub-usat.ll
@@ -73,17 +73,17 @@ define <8 x i16> @combine_constfold_v8i16() {
define <8 x i16> @combine_constfold_undef_v8i16() {
; SSE-LABEL: combine_constfold_undef_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65534,0,0,0]
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,u,0,65534,0,0,0]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_constfold_undef_v8i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65534,0,0,0]
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,u,0,65534,0,0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_constfold_undef_v8i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65534,0,0,0]
+; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,u,0,65534,0,0,0]
; AVX2-NEXT: retq
;
; AVX512-LABEL: combine_constfold_undef_v8i16:
diff --git a/llvm/test/CodeGen/X86/constant-pool-partition.ll b/llvm/test/CodeGen/X86/constant-pool-partition.ll
index 515284f..e42b41b 100644
--- a/llvm/test/CodeGen/X86/constant-pool-partition.ll
+++ b/llvm/test/CodeGen/X86/constant-pool-partition.ll
@@ -24,11 +24,11 @@ target triple = "x86_64-grtev4-linux-gnu"
; RUN: %s -o - 2>&1 | FileCheck %s --dump-input=always
;; For function @cold_func
-; CHECK: .section .rodata.cst8.hot,"aM",@progbits,8
+; CHECK: .section .rodata.cst8.hot.,"aM",@progbits,8
; CHECK-NEXT: .p2align
; CHECK-NEXT: .LCPI0_0:
; CHECK-NEXT: .quad 0x3fe5c28f5c28f5c3 # double 0.68000000000000005
-; CHECK-NEXT: .section .rodata.cst8.unlikely,"aM",@progbits,8
+; CHECK-NEXT: .section .rodata.cst8.unlikely.,"aM",@progbits,8
; CHECK-NEXT: .p2align
; CHECK-NEXT: .LCPI0_1:
; CHECK-NEXT: .quad 0x3eb0000000000000 # double 9.5367431640625E-7
@@ -50,11 +50,11 @@ target triple = "x86_64-grtev4-linux-gnu"
; CHECK-NEXT: .long 0x3e000000 # float 0.125
;; For function @hot_func
-; CHECK: .section .rodata.cst8.hot,"aM",@progbits,8
+; CHECK: .section .rodata.cst8.hot.,"aM",@progbits,8
; CHECK-NEXT: .p2align
; CHECK-NEXT: .LCPI3_0:
; CHECK-NEXT: .quad 0x3fe5c28f5c28f5c3 # double 0.68000000000000005
-; CHECK-NEXT: .section .rodata.cst16.hot,"aM",@progbits,16
+; CHECK-NEXT: .section .rodata.cst16.hot.,"aM",@progbits,16
; CHECK-NEXT: .p2align
; CHECK-NEXT: .LCPI3_1:
; CHECK-NEXT: .long 2147483648 # 0x80000000
diff --git a/llvm/test/CodeGen/X86/tail-dup-computed-goto.mir b/llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir
index 17de405..0f28964 100644
--- a/llvm/test/CodeGen/X86/tail-dup-computed-goto.mir
+++ b/llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir
@@ -1,6 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=early-tailduplication -tail-dup-pred-size=1 -tail-dup-succ-size=1 %s -o - | FileCheck %s
-# Check that only the computed goto is not be restrict by tail-dup-pred-size and tail-dup-succ-size.
+#
+# Check that only the computed goto and others are restricted by tail-dup-pred-size and tail-dup-succ-size.
+#
--- |
@computed_goto.dispatch = constant [5 x ptr] [ptr null, ptr blockaddress(@computed_goto, %bb1), ptr blockaddress(@computed_goto, %bb2), ptr blockaddress(@computed_goto, %bb3), ptr blockaddress(@computed_goto, %bb4)]
declare i64 @f0()
@@ -30,54 +32,54 @@ tracksRegLiveness: true
body: |
; CHECK-LABEL: name: computed_goto
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.bb1 (ir-block-address-taken %ir-block.bb1):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f1, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64_nosp = COPY [[COPY2]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.bb2 (ir-block-address-taken %ir-block.bb2):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f2, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gr64_nosp = COPY [[COPY4]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY4]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.bb3 (ir-block-address-taken %ir-block.bb3):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f3, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gr64_nosp = COPY [[COPY6]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY6]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4.bb4 (ir-block-address-taken %ir-block.bb4):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f4, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY8:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gr64_nosp = COPY [[COPY8]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY8]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5:
+ ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gr64_nosp = PHI [[COPY]], %bb.0, [[COPY4]], %bb.4, [[COPY3]], %bb.3, [[COPY2]], %bb.2, [[COPY1]], %bb.1
+ ; CHECK-NEXT: JMP64m $noreg, 8, [[PHI]], @computed_goto.dispatch, $noreg
bb.0:
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
diff --git a/llvm/test/CodeGen/X86/load-combine.ll b/llvm/test/CodeGen/X86/load-combine.ll
index b5f3e78..f21c075 100644
--- a/llvm/test/CodeGen/X86/load-combine.ll
+++ b/llvm/test/CodeGen/X86/load-combine.ll
@@ -800,13 +800,13 @@ define void @shift_i32_by_32(ptr %src1, ptr %src2, ptr %dst) {
; CHECK-LABEL: shift_i32_by_32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movl $-1, 4(%eax)
-; CHECK-NEXT: movl $-1, (%eax)
+; CHECK-NEXT: movl $0, 4(%eax)
+; CHECK-NEXT: movl $0, (%eax)
; CHECK-NEXT: retl
;
; CHECK64-LABEL: shift_i32_by_32:
; CHECK64: # %bb.0: # %entry
-; CHECK64-NEXT: movq $-1, (%rdx)
+; CHECK64-NEXT: movq $0, (%rdx)
; CHECK64-NEXT: retq
entry:
%load1 = load i8, ptr %src1, align 1
diff --git a/llvm/test/CodeGen/X86/pr33960.ll b/llvm/test/CodeGen/X86/pr33960.ll
index 44fe777..6ee270e 100644
--- a/llvm/test/CodeGen/X86/pr33960.ll
+++ b/llvm/test/CodeGen/X86/pr33960.ll
@@ -7,12 +7,10 @@
define void @PR33960() {
; X86-LABEL: PR33960:
; X86: # %bb.0: # %entry
-; X86-NEXT: movl $-1, b
; X86-NEXT: retl
;
; X64-LABEL: PR33960:
; X64: # %bb.0: # %entry
-; X64-NEXT: movl $-1, b(%rip)
; X64-NEXT: retq
entry:
%tmp = insertelement <4 x i32> <i32 undef, i32 -7, i32 -3, i32 undef>, i32 -2, i32 3
diff --git a/llvm/test/CodeGen/X86/pr76416.ll b/llvm/test/CodeGen/X86/pr76416.ll
new file mode 100644
index 0000000..68e9ef9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr76416.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+;
+; Reproducer from https://github.com/llvm/llvm-project/issues/76416
+;
+
+@load_p = external global ptr, align 8
+@load_data = external global i8, align 1
+
+define dso_local void @pr76416() {
+; CHECK-LABEL: pr76416:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jg .LBB0_3
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_2: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: incl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jle .LBB0_2
+; CHECK-NEXT: .LBB0_3: # %for.end
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq load_p@GOTPCREL(%rip), %rax
+; CHECK-NEXT: movq load_data@GOTPCREL(%rip), %rcx
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_4: # %for.cond1
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movq (%rax), %rdx
+; CHECK-NEXT: movslq -{{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: movzbl (%rdx,%rsi), %edx
+; CHECK-NEXT: movb %dl, (%rcx)
+; CHECK-NEXT: leal 1(%rsi), %edx
+; CHECK-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jmp .LBB0_4
+entry:
+ %alloca = alloca i32, align 4
+ store i32 0, ptr %alloca, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ %load.from.alloca.0 = load i32, ptr %alloca, align 4
+ %cmp = icmp slt i32 %load.from.alloca.0, 4
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ call void asm sideeffect "", "{ax},~{dirflag},~{fpsr},~{flags}"(i8 0) nounwind
+ %load.from.alloca.1 = load i32, ptr %alloca, align 4
+ %inc = add nsw i32 %load.from.alloca.1, 1
+ store i32 %inc, ptr %alloca, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ store i32 0, ptr %alloca, align 4
+ br label %for.cond1
+
+for.cond1: ; preds = %for.cond1, %for.end
+ call void asm sideeffect "", "N{dx},~{dirflag},~{fpsr},~{flags}"(i32 poison) nounwind
+ %load.from.load_p = load ptr, ptr @load_p, align 8
+ %regs = getelementptr inbounds { [4 x i8] }, ptr %load.from.load_p, i32 0, i32 0
+ %load.from.alloca.2 = load i32, ptr %alloca, align 4
+ %idxprom = sext i32 %load.from.alloca.2 to i64
+ %arrayidx = getelementptr inbounds [4 x i8], ptr %regs, i64 0, i64 %idxprom
+ %load.with.gep.ptr = load i8, ptr %arrayidx, align 1
+ store i8 %load.with.gep.ptr, ptr @load_data, align 1
+ %load.from.alloca.3 = load i32, ptr %alloca, align 4
+ %inc2 = add nsw i32 %load.from.alloca.3, 1
+ store i32 %inc2, ptr %alloca, align 4
+ br label %for.cond1
+}
diff --git a/llvm/test/CodeGen/X86/stack-protector.ll b/llvm/test/CodeGen/X86/stack-protector.ll
index f4f3ae4..772e776 100644
--- a/llvm/test/CodeGen/X86/stack-protector.ll
+++ b/llvm/test/CodeGen/X86/stack-protector.ll
@@ -6,6 +6,7 @@
; RUN: llc -mtriple=amd64-pc-openbsd < %s -o - | FileCheck --check-prefix=OPENBSD-AMD64 %s
; RUN: llc -mtriple=i386-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-I386 %s
; RUN: llc -mtriple=x86_64-w64-mingw32 < %s -o - | FileCheck --check-prefix=MINGW-X64 %s
+; RUN: llc -mtriple=x86_64-pc-cygwin < %s -o - | FileCheck --check-prefix=MINGW-X64 %s
; RUN: llc -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=IGNORE_INTRIN %s
%struct.foo = type { [16 x i8] }
diff --git a/llvm/test/CodeGen/X86/subreg-fail.mir b/llvm/test/CodeGen/X86/subreg-fail.mir
index c8146f0..dc69071 100644
--- a/llvm/test/CodeGen/X86/subreg-fail.mir
+++ b/llvm/test/CodeGen/X86/subreg-fail.mir
@@ -14,8 +14,8 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: test1
- ; CHECK: undef [[MOV32rm:%[0-9]+]].sub_32bit:gr64_nosp = MOV32rm undef %1:gr64, 1, $noreg, 0, $noreg :: (volatile load (s32) from `ptr undef`)
- ; CHECK-NEXT: undef [[MOV32rm1:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32rm undef %4:gr64, 1, $noreg, 0, $noreg :: (volatile load (s32) from `ptr undef`)
+ ; CHECK: undef [[MOV32rm:%[0-9]+]].sub_32bit:gr64_nosp = MOV32rm undef %1:gr64, 1, $noreg, 0, $noreg, implicit-def [[MOV32rm]] :: (volatile load (s32) from `ptr undef`)
+ ; CHECK-NEXT: undef [[MOV32rm1:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32rm undef %4:gr64, 1, $noreg, 0, $noreg, implicit-def [[MOV32rm1]] :: (volatile load (s32) from `ptr undef`)
; CHECK-NEXT: [[MOV32rm1:%[0-9]+]]:gr64_with_sub_8bit = SHL64ri [[MOV32rm1]], 32, implicit-def dead $eflags
; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64_with_sub_8bit = LEA64r [[MOV32rm1]], 1, [[MOV32rm]], 256, $noreg
; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64_with_sub_8bit = SHR64ri [[LEA64r]], 8, implicit-def dead $eflags
diff --git a/llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir b/llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir
new file mode 100644
index 0000000..e4fb812
--- /dev/null
+++ b/llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir
@@ -0,0 +1,451 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -mtriple=x86_64-- -run-pass=register-coalescer -o - %s | FileCheck %s --match-full-lines
+
+# We cannot lose the liveness of the high subregister of %1 when
+# coalesced with %0, so introduce an implicit-def of the super
+# register on the MOV.
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = MOV32r0 implicit-def dead $eflags
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: subreg_to_reg_folds_to_undef
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $rax
+
+ ; CHECK-LABEL: name: subreg_to_reg_folds_to_undef
+ ; CHECK: liveins: $rax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rax
+ ; CHECK-NEXT: undef [[MOV32rr:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32rr [[COPY]].sub_32bit, implicit-def [[MOV32rr]]
+ ; CHECK-NEXT: RET 0, implicit [[MOV32rr]]
+ %0:gr64 = COPY killed $rax
+ %1:gr32 = COPY killed %0.sub_32bit
+ %2:gr32 = MOV32rr killed %1
+ %3:gr64 = SUBREG_TO_REG 0, killed %2, %subreg.sub_32bit
+ %4:gr64 = COPY killed %3
+ RET 0, implicit %4
+
+...
+
+---
+name: coalesce_mov32r0_subreg_def_into_subreg_to_reg64
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_subreg_def_into_subreg_to_reg64
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ undef %0.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ %1:gr64 = SUBREG_TO_REG 0, killed %0.sub_32bit, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_def_with_super_def_to_reg64
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_def_with_super_def_to_reg64
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]], implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ undef %0.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def %0
+ %1:gr64 = SUBREG_TO_REG 0, killed %0.sub_32bit, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_already_defs_other_subreg
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_already_defs_other_subreg
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def undef [[MOV32r0_]].sub_8bit, implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit [[MOV32r0_]]
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit undef $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = MOV32r0 implicit-def dead $eflags, implicit-def undef %0.sub_8bit
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ INLINEASM &"", 0, implicit %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit undef $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+
+# Reduced realistic case which was asserting after introducing new implicit-defs
+---
+name: coalesce_needs_implicit_defs
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_needs_implicit_defs
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $rdi
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]], implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: undef [[MOV32r0_1:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_1]], implicit-def [[MOV32r0_1]]
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[MOV32r0_2:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: TEST64rr [[MOV32r0_1]], [[MOV32r0_1]], implicit-def $eflags
+ ; CHECK-NEXT: [[MOV32r0_2:%[0-9]+]].sub_8bit:gr64_with_sub_8bit = SETCCr 4, implicit killed $eflags
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: [[MOV32r0_2:%[0-9]+]]:gr64_with_sub_8bit = SHL64ri [[MOV32r0_2]], 4, implicit-def dead $eflags
+ ; CHECK-NEXT: [[MOV32r0_2:%[0-9]+]]:gr64_with_sub_8bit = ADD64rr [[MOV32r0_2]], [[COPY]], implicit-def dead $eflags
+ ; CHECK-NEXT: [[MOV32r0_1:%[0-9]+]]:gr64_with_sub_8bit = COPY [[MOV32r0_2]]
+ ; CHECK-NEXT: JMP_1 %bb.1
+ bb.0:
+ liveins: $rdi
+
+ %0:gr64 = COPY killed $rdi
+ %1:gr32 = MOV32r0 implicit-def dead $eflags
+ %2:gr64 = SUBREG_TO_REG 0, %1, %subreg.sub_32bit
+ %3:gr64 = COPY killed %2
+
+ bb.1:
+ %4:gr64 = COPY killed %3
+ %5:gr32 = MOV32r0 implicit-def dead $eflags
+ TEST64rr killed %4, %4, implicit-def $eflags
+ %6:gr8 = SETCCr 4, implicit killed $eflags
+ %7:gr32 = COPY killed %5
+ %7.sub_8bit:gr32 = COPY killed %6
+ %8:gr64 = SUBREG_TO_REG 0, killed %7, %subreg.sub_32bit
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %9:gr64 = SUBREG_TO_REG 0, %1, %subreg.sub_32bit
+ $rdi = COPY %9
+ CALL64r killed %9, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %10:gr64 = COPY killed %8
+ %10:gr64 = SHL64ri %10, 4, implicit-def dead $eflags
+ %11:gr64 = COPY killed %10
+ %11:gr64 = ADD64rr %11, %0, implicit-def dead $eflags
+ %3:gr64 = COPY killed %11
+ JMP_1 %bb.1
+
+...
+
+# Make sure to add the 'undef' flag to the result register %2,
+# because the top 32bits are not defined.
+---
+name: coalesce_add_implicitdef_and_undef
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_add_implicitdef_and_undef
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $eflags, $edx
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = COPY $edx
+ ; CHECK-NEXT: JMP_1 %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = ADD32ri [[COPY]].sub_32bit, -34, implicit-def $eflags, implicit-def [[COPY]]
+ ; CHECK-NEXT: FAKE_USE [[COPY]]
+ ; CHECK-NEXT: RET 0
+ bb.0:
+ liveins: $eflags, $edx
+ %0:gr32 = COPY $edx
+ JMP_1 %bb.1
+
+ bb.1:
+ %1:gr32 = COPY %0
+ %1:gr32 = ADD32ri %1, -34, implicit-def $eflags
+ %2:gr64_with_sub_8bit = SUBREG_TO_REG 0, killed %1, %subreg.sub_32bit
+ FAKE_USE %2
+ RET 0
+...
+
+# We can't mark the destination register as 'undef' or add implicit-def
+# because the top 24 bits of %0:gr32 are retained by the SUBREG_TO_REG.
+#
+# For example, if this were to result in:
+#
+# undef %2.sub_32bit:gr64_with_sub_8bit = COPY $edx
+# %1:gr8 = SETCCr 4, implicit $eflags
+# JMP_1 %bb.1
+#
+# bb.1:
+# undef %2.sub_8bit:gr64_with_sub_8bit = COPY %1, implicit-def %2
+#
+# Then this says that the top 56 bits of %2 are undef. That's not correct
+# because only the top 32 bits are undef.
+---
+name: coalesce_dont_add_implicitdef_or_undef
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_dont_add_implicitdef_or_undef
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $eflags, $edx
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = COPY $edx
+ ; CHECK-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
+ ; CHECK-NEXT: JMP_1 %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub_8bit:gr64_with_sub_8bit = COPY [[SETCCr]]
+ ; CHECK-NEXT: FAKE_USE [[COPY]]
+ ; CHECK-NEXT: RET 0
+ bb.0:
+ liveins: $eflags, $edx
+ %0:gr32 = COPY $edx
+ %1:gr8 = SETCCr 4, implicit killed $eflags
+ JMP_1 %bb.1
+
+ bb.1:
+ %0.sub_8bit:gr32 = COPY %1
+ %2:gr64_with_sub_8bit = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ FAKE_USE %2
+ RET 0
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_physreg_def
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_physreg_def
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi
+ ; CHECK-NEXT: CALL64r killed $rdi, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = MOV32r0 implicit-def dead $eflags
+ $rdi = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ CALL64r killed $rdi, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_physreg_use
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $eax
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_physreg_use
+ ; CHECK: liveins: $eax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: $eax = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, $eax, %subreg.sub_32bit
+ ; CHECK-NEXT: $rdi = COPY [[SUBREG_TO_REG]]
+ ; CHECK-NEXT: CALL64r [[SUBREG_TO_REG]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ $eax = MOV32r0 implicit-def dead $eflags
+ %1:gr64 = SUBREG_TO_REG 0, killed $eax, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+# Coalesced instruction is a copy with other implicit operands
+---
+name: coalesce_copy_into_subreg_to_reg64
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $eax
+ ; CHECK-LABEL: name: coalesce_copy_into_subreg_to_reg64
+ ; CHECK: liveins: $eax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = COPY $eax, implicit-def dead $eflags, implicit-def [[COPY]]
+ ; CHECK-NEXT: $rdi = COPY [[COPY]]
+ ; CHECK-NEXT: CALL64r [[COPY]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = COPY $eax, implicit-def dead $eflags
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_multiple_redef_value
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_multiple_redef_value
+ ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit-def undef [[MOV32r0_]].sub_32bit, implicit [[MOV32r0_]].sub_32bit, implicit-def [[MOV32r0_]]
+ ; CHECK-NEXT: $rdi = COPY [[MOV32r0_]]
+ ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ %0:gr32 = MOV32r0 implicit-def dead $eflags
+ INLINEASM &"", 0, implicit-def %0, implicit %0
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_def_is_block_liveout
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_def_is_block_liveout
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit-def undef %1.sub_32bit, implicit-def %1
+ ; CHECK-NEXT: JCC_1 %bb.1, 4, implicit undef $eflags
+ ; CHECK-NEXT: JMP_1 %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $rdi = COPY %1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: CALL64r %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: RET 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ bb.0:
+ INLINEASM &"", 0, implicit-def %0:gr32
+ JCC_1 %bb.1, 4, implicit undef $eflags
+ JMP_1 %bb.2
+
+ bb.1:
+ %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ RET 0
+
+ bb.2:
+
+...
+
+---
+name: coalesce_mov32r0_into_subreg_to_reg64_def_is_phi_def
+frameInfo:
+ adjustsStack: true
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_def_is_phi_def
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit-def undef %1.sub_32bit, implicit-def %1
+ ; CHECK-NEXT: JCC_1 %bb.1, 4, implicit undef $eflags
+ ; CHECK-NEXT: JMP_1 %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $rdi = COPY %1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: CALL64r %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ ; CHECK-NEXT: JMP_1 %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ bb.0:
+
+ INLINEASM &"", 0, implicit-def %0:gr32
+ JCC_1 %bb.1, 4, implicit undef $eflags
+ JMP_1 %bb.2
+
+ bb.1:
+ %1:gr64 = SUBREG_TO_REG 0, %0, %subreg.sub_32bit
+ $rdi = COPY %1
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ CALL64r %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+ JMP_1 %bb.1
+
+ bb.2:
+
+...
diff --git a/llvm/test/CodeGen/X86/win32-ssp.ll b/llvm/test/CodeGen/X86/win32-ssp.ll
index 536a6d5..259f039 100644
--- a/llvm/test/CodeGen/X86/win32-ssp.ll
+++ b/llvm/test/CodeGen/X86/win32-ssp.ll
@@ -1,7 +1,9 @@
; RUN: llc -mtriple=x86_64-w64-mingw32 < %s -o - | FileCheck --check-prefix=MINGW %s
+; RUN: llc -mtriple=x86_64-pc-cygwin < %s -o - | FileCheck --check-prefix=MINGW %s
; RUN: llc -mtriple=x86_64-pc-windows-itanium < %s -o - | FileCheck --check-prefix=MSVC %s
; RUN: llc -mtriple=x86_64-pc-windows-msvc < %s -o - | FileCheck --check-prefix=MSVC %s
; RUN: llc -mtriple=i686-w64-mingw32 < %s -o - | FileCheck --check-prefix=MINGW %s
+; RUN: llc -mtriple=i686-pc-cygwin < %s -o - | FileCheck --check-prefix=MINGW %s
declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
declare dso_local void @other(ptr)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll b/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
index ca1153a..bdada7d 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
@@ -1,12 +1,5 @@
; RUN: opt < %s -S -passes=hwasan -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
-; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
-; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
-
-; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
-
-; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
-
; CHECK: @four.hwasan = private global { i32, [12 x i8] } { i32 1, [12 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\10" }, align 16
; CHECK: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @four.hwasan to i64), i64 ptrtoint (ptr @four.hwasan.descriptor to i64)) to i32), i32 268435460 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
@@ -17,14 +10,21 @@
; CHECK: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor to i64)) to i32), i32 318767088 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
; CHECK: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 301989920 }, section "hwasan_globals", !associated [[HUGE]]
+; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
+; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
+
+; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
+
+; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
+
; CHECK: @four = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @four.hwasan to i64), i64 2305843009213693952) to ptr)
; CHECK: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 2449958197289549824) to ptr)
; CHECK: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @huge.hwasan to i64), i64 2594073385365405696) to ptr)
-; CHECK: [[NOTE]] = !{ptr @hwasan.note}
; CHECK: [[FOUR]] = !{ptr @four.hwasan}
; CHECK: [[SIXTEEN]] = !{ptr @sixteen.hwasan}
; CHECK: [[HUGE]] = !{ptr @huge.hwasan}
+; CHECK: [[NOTE]] = !{ptr @hwasan.note}
source_filename = "foo"
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll b/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll
index f5ae1c0..4c28523 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll
@@ -1,16 +1,11 @@
-; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android29 | FileCheck --check-prefixes=CHECK,CHECK29 %s
-; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android30 | FileCheck --check-prefixes=CHECK,CHECK30 %s
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android29 | FileCheck --check-prefixes=CHECK,CHECK29,NOALLGLOBALS %s
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android30 | FileCheck --check-prefixes=CHECK,CHECK30,NOALLGLOBALS %s
+; RUN: opt < %s -S -passes=hwasan -mtriple=riscv64-unknown-elf -hwasan-globals=1 -hwasan-all-globals=1 | FileCheck --check-prefixes=CHECK,CHECK30,ALLGLOBALS %s
; CHECK29: @four = global
; CHECK: @specialcaselisted = global i16 2, no_sanitize_hwaddress
-; CHECK: @insection = global i16 2, section "custom"
-; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
-; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
-
-; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
-
-; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
+; NOALLGLOBALS: @insection = global i16 2, section "custom"
; CHECK30: @four.hwasan = private global { i32, [12 x i8] } { i32 1, [12 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\AC" }, align 16
; CHECK30: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @four.hwasan to i64), i64 ptrtoint (ptr @four.hwasan.descriptor to i64)) to i32), i32 -1409286140 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
@@ -22,14 +17,23 @@
; CHECK30: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor to i64)) to i32), i32 -1358954512 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
; CHECK30: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 -1375731680 }, section "hwasan_globals", !associated [[HUGE]]
+; ALLGLOBALS: @insection.hwasan = private global { i16, [14 x i8] } { i16 2, [14 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\00\AF" }, section "custom", align 16
+
+; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
+; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
+
+; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
+
+; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
+
; CHECK30: @four = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @four.hwasan to i64), i64 -6052837899185946624) to ptr)
; CHECK30: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 -5980780305148018688) to ptr)
; CHECK30: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @huge.hwasan to i64), i64 -5908722711110090752) to ptr)
-; CHECK: [[NOTE]] = !{ptr @hwasan.note}
; CHECK30: [[FOUR]] = !{ptr @four.hwasan}
; CHECK30: [[SIXTEEN]] = !{ptr @sixteen.hwasan}
; CHECK30: [[HUGE]] = !{ptr @huge.hwasan}
+; CHECK: [[NOTE]] = !{ptr @hwasan.note}
source_filename = "foo"
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll
index e5e4371..43da02d 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll
@@ -7,9 +7,6 @@
; - llvm.x86.vgf2p8affineinvqb.128
; - llvm.x86.vgf2p8affineinvqb.256
; - llvm.x86.vgf2p8affineinvqb.512
-; - llvm.x86.vgf2p8affineqb.128
-; - llvm.x86.vgf2p8affineqb.256
-; - llvm.x86.vgf2p8affineqb.512
;
; Heuristically handled:
; - llvm.x86.vgf2p8mulb.128
@@ -254,53 +251,42 @@ define { <16 x i8>, <16 x i8>, <16 x i8> } @test_vgf2p8affineqb_128(<16 x i8> %s
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
-; CHECK-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
-; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
-; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP8]], 0
-; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
-; CHECK: 9:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT: unreachable
-; CHECK: 10:
-; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[SRC1:%.*]], <16 x i8> [[SRC2:%.*]], i8 3)
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
-; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i128 [[TMP12]], 0
-; CHECK-NEXT: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
-; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP13]], 0
-; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSCMP3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF1]]
-; CHECK: 14:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT: unreachable
-; CHECK: 15:
+; CHECK-NEXT: [[TMP7:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[TMP3]], <16 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP8:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[SRC2:%.*]], <16 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP9:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[TMP3]], <16 x i8> [[SRC1:%.*]], i8 0)
+; CHECK-NEXT: [[TMP10:%.*]] = or <16 x i8> [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <16 x i8> [[TMP10]], [[TMP9]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i8> [[TMP13]], zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[SRC1]], <16 x i8> [[SRC2]], i8 3)
+; CHECK-NEXT: [[TMP14:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[TMP3]], <16 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP15:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[SRC2]], <16 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP20:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[TMP3]], <16 x i8> [[SRC1]], i8 0)
+; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i8> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i8> [[TMP17]], [[TMP20]]
+; CHECK-NEXT: [[TMP19:%.*]] = or <16 x i8> [[TMP18]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[SRC1]], <16 x i8> [[SRC2]], i8 4)
-; CHECK-NEXT: [[TMP17:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
-; CHECK-NEXT: [[_MSCMP6:%.*]] = icmp ne i128 [[TMP17]], 0
-; CHECK-NEXT: [[TMP18:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
-; CHECK-NEXT: [[_MSCMP7:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT: [[_MSOR8:%.*]] = or i1 [[_MSCMP6]], [[_MSCMP7]]
-; CHECK-NEXT: br i1 [[_MSOR8]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF1]]
-; CHECK: 19:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT: unreachable
-; CHECK: 20:
+; CHECK-NEXT: [[TMP37:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[TMP3]], <16 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP39:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[SRC2]], <16 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP40:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[TMP3]], <16 x i8> [[SRC1]], i8 0)
+; CHECK-NEXT: [[TMP41:%.*]] = or <16 x i8> [[TMP37]], [[TMP39]]
+; CHECK-NEXT: [[TMP42:%.*]] = or <16 x i8> [[TMP41]], [[TMP40]]
+; CHECK-NEXT: [[TMP43:%.*]] = or <16 x i8> [[TMP42]], zeroinitializer
; CHECK-NEXT: [[TMP21:%.*]] = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> [[SRC1]], <16 x i8> [[SRC2]], i8 5)
-; CHECK-NEXT: [[TMP22:%.*]] = select <16 x i1> [[TMP6]], <16 x i8> zeroinitializer, <16 x i8> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = select <16 x i1> [[TMP6]], <16 x i8> [[TMP19]], <16 x i8> zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = xor <16 x i8> [[TMP16]], zeroinitializer
-; CHECK-NEXT: [[TMP24:%.*]] = or <16 x i8> [[TMP23]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = or <16 x i8> [[TMP23]], [[TMP19]]
; CHECK-NEXT: [[TMP25:%.*]] = or <16 x i8> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i8> [[TMP25]], <16 x i8> [[TMP22]]
; CHECK-NEXT: [[TMP26:%.*]] = select <16 x i1> [[TMP6]], <16 x i8> [[TMP16]], <16 x i8> zeroinitializer
-; CHECK-NEXT: [[TMP27:%.*]] = select <16 x i1> [[TMP6]], <16 x i8> zeroinitializer, <16 x i8> [[TMP4]]
+; CHECK-NEXT: [[TMP27:%.*]] = select <16 x i1> [[TMP6]], <16 x i8> [[TMP43]], <16 x i8> [[TMP4]]
; CHECK-NEXT: [[TMP28:%.*]] = xor <16 x i8> [[TMP21]], [[PASSTHRU:%.*]]
-; CHECK-NEXT: [[TMP29:%.*]] = or <16 x i8> [[TMP28]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = or <16 x i8> [[TMP28]], [[TMP43]]
; CHECK-NEXT: [[TMP30:%.*]] = or <16 x i8> [[TMP29]], [[TMP4]]
; CHECK-NEXT: [[_MSPROP_SELECT1:%.*]] = select <16 x i1> [[TMP5]], <16 x i8> [[TMP30]], <16 x i8> [[TMP27]]
; CHECK-NEXT: [[TMP31:%.*]] = select <16 x i1> [[TMP6]], <16 x i8> [[TMP21]], <16 x i8> [[PASSTHRU]]
+; CHECK-NEXT: [[TMP38:%.*]] = insertvalue { <16 x i8>, <16 x i8>, <16 x i8> } { <16 x i8> splat (i8 -1), <16 x i8> splat (i8 -1), <16 x i8> splat (i8 -1) }, <16 x i8> [[TMP12]], 0
; CHECK-NEXT: [[TMP32:%.*]] = insertvalue { <16 x i8>, <16 x i8>, <16 x i8> } poison, <16 x i8> [[TMP11]], 0
-; CHECK-NEXT: [[TMP33:%.*]] = insertvalue { <16 x i8>, <16 x i8>, <16 x i8> } { <16 x i8> zeroinitializer, <16 x i8> splat (i8 -1), <16 x i8> splat (i8 -1) }, <16 x i8> [[_MSPROP_SELECT]], 1
+; CHECK-NEXT: [[TMP33:%.*]] = insertvalue { <16 x i8>, <16 x i8>, <16 x i8> } [[TMP38]], <16 x i8> [[_MSPROP_SELECT]], 1
; CHECK-NEXT: [[TMP34:%.*]] = insertvalue { <16 x i8>, <16 x i8>, <16 x i8> } [[TMP32]], <16 x i8> [[TMP26]], 1
; CHECK-NEXT: [[TMP35:%.*]] = insertvalue { <16 x i8>, <16 x i8>, <16 x i8> } [[TMP33]], <16 x i8> [[_MSPROP_SELECT1]], 2
; CHECK-NEXT: [[TMP36:%.*]] = insertvalue { <16 x i8>, <16 x i8>, <16 x i8> } [[TMP34]], <16 x i8> [[TMP31]], 2
@@ -329,53 +315,42 @@ define { <32 x i8>, <32 x i8>, <32 x i8> } @test_vgf2p8affineqb_256(<32 x i8> %s
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
-; CHECK-NEXT: [[TMP7:%.*]] = bitcast <32 x i8> [[TMP2]] to i256
-; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = bitcast <32 x i8> [[TMP3]] to i256
-; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i256 [[TMP8]], 0
-; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
-; CHECK: 9:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT: unreachable
-; CHECK: 10:
-; CHECK-NEXT: [[TMP11:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[SRC1:%.*]], <32 x i8> [[SRC2:%.*]], i8 3)
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast <32 x i8> [[TMP2]] to i256
-; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i256 [[TMP12]], 0
-; CHECK-NEXT: [[TMP13:%.*]] = bitcast <32 x i8> [[TMP3]] to i256
-; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP13]], 0
-; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSCMP3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF1]]
-; CHECK: 14:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT: unreachable
-; CHECK: 15:
+; CHECK-NEXT: [[TMP7:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[TMP3]], <32 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP8:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[SRC2:%.*]], <32 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP9:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[TMP3]], <32 x i8> [[SRC1:%.*]], i8 0)
+; CHECK-NEXT: [[TMP10:%.*]] = or <32 x i8> [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <32 x i8> [[TMP10]], [[TMP9]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <32 x i8> [[TMP13]], zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[SRC1]], <32 x i8> [[SRC2]], i8 3)
+; CHECK-NEXT: [[TMP14:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[TMP3]], <32 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP15:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[SRC2]], <32 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP20:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[TMP3]], <32 x i8> [[SRC1]], i8 0)
+; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i8> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <32 x i8> [[TMP17]], [[TMP20]]
+; CHECK-NEXT: [[TMP19:%.*]] = or <32 x i8> [[TMP18]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[SRC1]], <32 x i8> [[SRC2]], i8 4)
-; CHECK-NEXT: [[TMP17:%.*]] = bitcast <32 x i8> [[TMP2]] to i256
-; CHECK-NEXT: [[_MSCMP6:%.*]] = icmp ne i256 [[TMP17]], 0
-; CHECK-NEXT: [[TMP18:%.*]] = bitcast <32 x i8> [[TMP3]] to i256
-; CHECK-NEXT: [[_MSCMP7:%.*]] = icmp ne i256 [[TMP18]], 0
-; CHECK-NEXT: [[_MSOR8:%.*]] = or i1 [[_MSCMP6]], [[_MSCMP7]]
-; CHECK-NEXT: br i1 [[_MSOR8]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF1]]
-; CHECK: 19:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT: unreachable
-; CHECK: 20:
+; CHECK-NEXT: [[TMP37:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[TMP3]], <32 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP39:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[SRC2]], <32 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP40:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[TMP3]], <32 x i8> [[SRC1]], i8 0)
+; CHECK-NEXT: [[TMP41:%.*]] = or <32 x i8> [[TMP37]], [[TMP39]]
+; CHECK-NEXT: [[TMP42:%.*]] = or <32 x i8> [[TMP41]], [[TMP40]]
+; CHECK-NEXT: [[TMP43:%.*]] = or <32 x i8> [[TMP42]], zeroinitializer
; CHECK-NEXT: [[TMP21:%.*]] = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> [[SRC1]], <32 x i8> [[SRC2]], i8 5)
-; CHECK-NEXT: [[TMP22:%.*]] = select <32 x i1> [[TMP6]], <32 x i8> zeroinitializer, <32 x i8> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = select <32 x i1> [[TMP6]], <32 x i8> [[TMP19]], <32 x i8> zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = xor <32 x i8> [[TMP16]], zeroinitializer
-; CHECK-NEXT: [[TMP24:%.*]] = or <32 x i8> [[TMP23]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = or <32 x i8> [[TMP23]], [[TMP19]]
; CHECK-NEXT: [[TMP25:%.*]] = or <32 x i8> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> [[TMP25]], <32 x i8> [[TMP22]]
; CHECK-NEXT: [[TMP26:%.*]] = select <32 x i1> [[TMP6]], <32 x i8> [[TMP16]], <32 x i8> zeroinitializer
-; CHECK-NEXT: [[TMP27:%.*]] = select <32 x i1> [[TMP6]], <32 x i8> zeroinitializer, <32 x i8> [[TMP4]]
+; CHECK-NEXT: [[TMP27:%.*]] = select <32 x i1> [[TMP6]], <32 x i8> [[TMP43]], <32 x i8> [[TMP4]]
; CHECK-NEXT: [[TMP28:%.*]] = xor <32 x i8> [[TMP21]], [[PASSTHRU:%.*]]
-; CHECK-NEXT: [[TMP29:%.*]] = or <32 x i8> [[TMP28]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = or <32 x i8> [[TMP28]], [[TMP43]]
; CHECK-NEXT: [[TMP30:%.*]] = or <32 x i8> [[TMP29]], [[TMP4]]
; CHECK-NEXT: [[_MSPROP_SELECT1:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> [[TMP30]], <32 x i8> [[TMP27]]
; CHECK-NEXT: [[TMP31:%.*]] = select <32 x i1> [[TMP6]], <32 x i8> [[TMP21]], <32 x i8> [[PASSTHRU]]
+; CHECK-NEXT: [[TMP38:%.*]] = insertvalue { <32 x i8>, <32 x i8>, <32 x i8> } { <32 x i8> splat (i8 -1), <32 x i8> splat (i8 -1), <32 x i8> splat (i8 -1) }, <32 x i8> [[TMP12]], 0
; CHECK-NEXT: [[TMP32:%.*]] = insertvalue { <32 x i8>, <32 x i8>, <32 x i8> } poison, <32 x i8> [[TMP11]], 0
-; CHECK-NEXT: [[TMP33:%.*]] = insertvalue { <32 x i8>, <32 x i8>, <32 x i8> } { <32 x i8> zeroinitializer, <32 x i8> splat (i8 -1), <32 x i8> splat (i8 -1) }, <32 x i8> [[_MSPROP_SELECT]], 1
+; CHECK-NEXT: [[TMP33:%.*]] = insertvalue { <32 x i8>, <32 x i8>, <32 x i8> } [[TMP38]], <32 x i8> [[_MSPROP_SELECT]], 1
; CHECK-NEXT: [[TMP34:%.*]] = insertvalue { <32 x i8>, <32 x i8>, <32 x i8> } [[TMP32]], <32 x i8> [[TMP26]], 1
; CHECK-NEXT: [[TMP35:%.*]] = insertvalue { <32 x i8>, <32 x i8>, <32 x i8> } [[TMP33]], <32 x i8> [[_MSPROP_SELECT1]], 2
; CHECK-NEXT: [[TMP36:%.*]] = insertvalue { <32 x i8>, <32 x i8>, <32 x i8> } [[TMP34]], <32 x i8> [[TMP31]], 2
@@ -404,53 +379,42 @@ define { <64 x i8>, <64 x i8>, <64 x i8> } @test_vgf2p8affineqb_512(<64 x i8> %s
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP1]] to <64 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1>
-; CHECK-NEXT: [[TMP7:%.*]] = bitcast <64 x i8> [[TMP2]] to i512
-; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = bitcast <64 x i8> [[TMP3]] to i512
-; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i512 [[TMP8]], 0
-; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
-; CHECK: 9:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT: unreachable
-; CHECK: 10:
-; CHECK-NEXT: [[TMP11:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[SRC1:%.*]], <64 x i8> [[SRC2:%.*]], i8 3)
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast <64 x i8> [[TMP2]] to i512
-; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i512 [[TMP12]], 0
-; CHECK-NEXT: [[TMP13:%.*]] = bitcast <64 x i8> [[TMP3]] to i512
-; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i512 [[TMP13]], 0
-; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSCMP3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF1]]
-; CHECK: 14:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT: unreachable
-; CHECK: 15:
+; CHECK-NEXT: [[TMP7:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[TMP3]], <64 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP8:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[SRC2:%.*]], <64 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP9:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[TMP3]], <64 x i8> [[SRC1:%.*]], i8 0)
+; CHECK-NEXT: [[TMP10:%.*]] = or <64 x i8> [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <64 x i8> [[TMP10]], [[TMP9]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <64 x i8> [[TMP13]], zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[SRC1]], <64 x i8> [[SRC2]], i8 3)
+; CHECK-NEXT: [[TMP14:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[TMP3]], <64 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP15:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[SRC2]], <64 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP20:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[TMP3]], <64 x i8> [[SRC1]], i8 0)
+; CHECK-NEXT: [[TMP17:%.*]] = or <64 x i8> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <64 x i8> [[TMP17]], [[TMP20]]
+; CHECK-NEXT: [[TMP19:%.*]] = or <64 x i8> [[TMP18]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[SRC1]], <64 x i8> [[SRC2]], i8 4)
-; CHECK-NEXT: [[TMP17:%.*]] = bitcast <64 x i8> [[TMP2]] to i512
-; CHECK-NEXT: [[_MSCMP6:%.*]] = icmp ne i512 [[TMP17]], 0
-; CHECK-NEXT: [[TMP18:%.*]] = bitcast <64 x i8> [[TMP3]] to i512
-; CHECK-NEXT: [[_MSCMP7:%.*]] = icmp ne i512 [[TMP18]], 0
-; CHECK-NEXT: [[_MSOR8:%.*]] = or i1 [[_MSCMP6]], [[_MSCMP7]]
-; CHECK-NEXT: br i1 [[_MSOR8]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF1]]
-; CHECK: 19:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT: unreachable
-; CHECK: 20:
+; CHECK-NEXT: [[TMP37:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[TMP3]], <64 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP39:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[SRC2]], <64 x i8> [[TMP2]], i8 0)
+; CHECK-NEXT: [[TMP40:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[TMP3]], <64 x i8> [[SRC1]], i8 0)
+; CHECK-NEXT: [[TMP41:%.*]] = or <64 x i8> [[TMP37]], [[TMP39]]
+; CHECK-NEXT: [[TMP42:%.*]] = or <64 x i8> [[TMP41]], [[TMP40]]
+; CHECK-NEXT: [[TMP43:%.*]] = or <64 x i8> [[TMP42]], zeroinitializer
; CHECK-NEXT: [[TMP21:%.*]] = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> [[SRC1]], <64 x i8> [[SRC2]], i8 5)
-; CHECK-NEXT: [[TMP22:%.*]] = select <64 x i1> [[TMP6]], <64 x i8> zeroinitializer, <64 x i8> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = select <64 x i1> [[TMP6]], <64 x i8> [[TMP19]], <64 x i8> zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = xor <64 x i8> [[TMP16]], zeroinitializer
-; CHECK-NEXT: [[TMP24:%.*]] = or <64 x i8> [[TMP23]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = or <64 x i8> [[TMP23]], [[TMP19]]
; CHECK-NEXT: [[TMP25:%.*]] = or <64 x i8> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <64 x i1> [[TMP5]], <64 x i8> [[TMP25]], <64 x i8> [[TMP22]]
; CHECK-NEXT: [[TMP26:%.*]] = select <64 x i1> [[TMP6]], <64 x i8> [[TMP16]], <64 x i8> zeroinitializer
-; CHECK-NEXT: [[TMP27:%.*]] = select <64 x i1> [[TMP6]], <64 x i8> zeroinitializer, <64 x i8> [[TMP4]]
+; CHECK-NEXT: [[TMP27:%.*]] = select <64 x i1> [[TMP6]], <64 x i8> [[TMP43]], <64 x i8> [[TMP4]]
; CHECK-NEXT: [[TMP28:%.*]] = xor <64 x i8> [[TMP21]], [[PASSTHRU:%.*]]
-; CHECK-NEXT: [[TMP29:%.*]] = or <64 x i8> [[TMP28]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = or <64 x i8> [[TMP28]], [[TMP43]]
; CHECK-NEXT: [[TMP30:%.*]] = or <64 x i8> [[TMP29]], [[TMP4]]
; CHECK-NEXT: [[_MSPROP_SELECT1:%.*]] = select <64 x i1> [[TMP5]], <64 x i8> [[TMP30]], <64 x i8> [[TMP27]]
; CHECK-NEXT: [[TMP31:%.*]] = select <64 x i1> [[TMP6]], <64 x i8> [[TMP21]], <64 x i8> [[PASSTHRU]]
+; CHECK-NEXT: [[TMP38:%.*]] = insertvalue { <64 x i8>, <64 x i8>, <64 x i8> } { <64 x i8> splat (i8 -1), <64 x i8> splat (i8 -1), <64 x i8> splat (i8 -1) }, <64 x i8> [[TMP12]], 0
; CHECK-NEXT: [[TMP32:%.*]] = insertvalue { <64 x i8>, <64 x i8>, <64 x i8> } poison, <64 x i8> [[TMP11]], 0
-; CHECK-NEXT: [[TMP33:%.*]] = insertvalue { <64 x i8>, <64 x i8>, <64 x i8> } { <64 x i8> zeroinitializer, <64 x i8> splat (i8 -1), <64 x i8> splat (i8 -1) }, <64 x i8> [[_MSPROP_SELECT]], 1
+; CHECK-NEXT: [[TMP33:%.*]] = insertvalue { <64 x i8>, <64 x i8>, <64 x i8> } [[TMP38]], <64 x i8> [[_MSPROP_SELECT]], 1
; CHECK-NEXT: [[TMP34:%.*]] = insertvalue { <64 x i8>, <64 x i8>, <64 x i8> } [[TMP32]], <64 x i8> [[TMP26]], 1
; CHECK-NEXT: [[TMP35:%.*]] = insertvalue { <64 x i8>, <64 x i8>, <64 x i8> } [[TMP33]], <64 x i8> [[_MSPROP_SELECT1]], 2
; CHECK-NEXT: [[TMP36:%.*]] = insertvalue { <64 x i8>, <64 x i8>, <64 x i8> } [[TMP34]], <64 x i8> [[TMP31]], 2
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s
index bc0f586..f586e4a 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s
@@ -173,6 +173,21 @@ v_mul_f16_e32 v5, v1, v255
v_mul_f16_e32 v5, v255, v2
// GFX11: :[[@LINE-1]]:1: error: operands are not valid for this GPU or mode
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX11: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0] row_mask:0x0 bank_mask:0x0
+// GFX11: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16_dpp v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX11: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
+v_pk_fmac_f16_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
v_sub_f16_dpp v255, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX11: :[[@LINE-1]]:1: error: operands are not valid for this GPU or mode
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s b/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s
index 98436c9..f1641fc 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s
@@ -24,3 +24,81 @@ ds_atomic_barrier_arrive_rtn_b64 v[2:3], v2, v[4:5] offset:513
ds_atomic_barrier_arrive_rtn_b64 v[254:255], v2, v[4:5] offset:65535
// GFX1250: ds_atomic_barrier_arrive_rtn_b64 v[254:255], v2, v[4:5] offset:65535 ; encoding: [0xff,0xff,0xd4,0xd9,0x02,0x04,0x00,0xfe]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+ds_add_f64 v1, v[2:3] offset:65535
+// GFX1250: ds_add_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x50,0xd9,0x01,0x02,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_f64 v1, v[2:3] offset:65535
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_f64 v255, v[2:3] offset:65535
+// GFX1250: ds_add_f64 v255, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x50,0xd9,0xff,0x02,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_f64 v255, v[2:3] offset:65535
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_f64 v1, v[254:255] offset:65535
+// GFX1250: ds_add_f64 v1, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x50,0xd9,0x01,0xfe,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_f64 v1, v[254:255] offset:65535
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_f64 v1, v[2:3]
+// GFX1250: ds_add_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x50,0xd9,0x01,0x02,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_f64 v1, v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_f64 v1, v[2:3]
+// GFX1250: ds_add_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x50,0xd9,0x01,0x02,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_f64 v1, v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_f64 v1, v[2:3] offset:4
+// GFX1250: ds_add_f64 v1, v[2:3] offset:4 ; encoding: [0x04,0x00,0x50,0xd9,0x01,0x02,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_f64 v1, v[2:3] offset:4
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_rtn_f64 v[4:5], v1, v[2:3] offset:65535
+// GFX1250: ds_add_rtn_f64 v[4:5], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd9,0x01,0x02,0x00,0x04]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_rtn_f64 v[4:5], v1, v[2:3] offset:65535
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_rtn_f64 v[254:255], v1, v[2:3] offset:65535
+// GFX1250: ds_add_rtn_f64 v[254:255], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd9,0x01,0x02,0x00,0xfe]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_rtn_f64 v[254:255], v1, v[2:3] offset:65535
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_rtn_f64 v[4:5], v255, v[2:3] offset:65535
+// GFX1250: ds_add_rtn_f64 v[4:5], v255, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd9,0xff,0x02,0x00,0x04]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_rtn_f64 v[4:5], v255, v[2:3] offset:65535
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_rtn_f64 v[4:5], v1, v[254:255] offset:65535
+// GFX1250: ds_add_rtn_f64 v[4:5], v1, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd9,0x01,0xfe,0x00,0x04]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_rtn_f64 v[4:5], v1, v[254:255] offset:65535
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_rtn_f64 v[4:5], v1, v[2:3]
+// GFX1250: ds_add_rtn_f64 v[4:5], v1, v[2:3] ; encoding: [0x00,0x00,0xd0,0xd9,0x01,0x02,0x00,0x04]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_rtn_f64 v[4:5], v1, v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_rtn_f64 v[4:5], v1, v[2:3]
+// GFX1250: ds_add_rtn_f64 v[4:5], v1, v[2:3] ; encoding: [0x00,0x00,0xd0,0xd9,0x01,0x02,0x00,0x04]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_rtn_f64 v[4:5], v1, v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+ds_add_rtn_f64 v[4:5], v1, v[2:3] offset:4
+// GFX1250: ds_add_rtn_f64 v[4:5], v1, v[2:3] offset:4 ; encoding: [0x04,0x00,0xd0,0xd9,0x01,0x02,0x00,0x04]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: ds_add_rtn_f64 v[4:5], v1, v[2:3] offset:4
+// GFX12-ERR-NEXT:{{^}}^
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s
index 1d14bd9..7a4da25 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s
@@ -18,3 +18,303 @@ buffer_atomic_and_b32 v5, v1, s[8:11], s3 offen offset:4095 nv
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: nv is not supported on this GPU
// GFX12-ERR-NEXT:{{^}}buffer_atomic_and_b32 v5, v1, s[8:11], s3 offen offset:4095 nv
// GFX12-ERR-NEXT:{{^}} ^
+
+buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT:{{^}}buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_add_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX1250: buffer_atomic_add_f64 v[4:5], off, s[12:15], s3 offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_add_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_add_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX1250: buffer_atomic_add_f64 v[4:5], off, s[96:99], s3 offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_add_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_add_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s101 offset:4095 ; encoding: [0x65,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_add_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_add_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], m0 offset:4095 ; encoding: [0x7d,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_add_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_add_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX1250: buffer_atomic_add_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_add_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_add_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX1250: buffer_atomic_add_f64 v[4:5], v0, s[8:11], s3 offen offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_add_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_add_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_add_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[12:15], s3 offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[96:99], s3 offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s101 offset:4095 ; encoding: [0x65,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], m0 offset:4095 ; encoding: [0x7d,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[12:15], s3 offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[96:99], s3 offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s101 offset:4095 ; encoding: [0x65,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], m0 offset:4095 ; encoding: [0x7d,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[12:15], s3 offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[96:99], s3 offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s101 offset:4095 ; encoding: [0x65,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], m0 offset:4095 ; encoding: [0x7d,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_min_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_min_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], off, s[8:11], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[12:15], s3 offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], off, s[12:15], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[96:99], s3 offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], off, s[96:99], s3 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s101 offset:4095 ; encoding: [0x65,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], off, s[8:11], s101 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], m0 offset:4095 ; encoding: [0x7d,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], off, s[8:11], m0 offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], v0, s[8:11], s3 offen offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], off, s[8:11], s3
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], off, s[8:11], s3
+// GFX12-ERR-NEXT:{{^}}^
+
+buffer_atomic_max_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: buffer_atomic_max_f64 v[4:5], off, s[8:11], s3 offset:7
+// GFX12-ERR-NEXT:{{^}}^
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vflat.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vflat.s
index c5288a7..48ddfec 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vflat.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vflat.s
@@ -1,6 +1,2817 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s | FileCheck --check-prefix=GFX1250 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+flat_atomic_add_f32 v1, v[0:1], v2 offset:-8000000 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_add_f32 v1, v[0:1], v2 offset:-8000000 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x15,0xec,0x01,0x00,0x10,0x01,0x00,0x00,0xee,0x85]
+
+flat_atomic_add_f32 v1, v[0:1], v2 offset:8000000 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_add_f32 v1, v[0:1], v2 offset:8000000 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x15,0xec,0x01,0x00,0x10,0x01,0x00,0x00,0x12,0x7a]
+
+flat_atomic_add_f32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_add_f32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x15,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_add_f32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_add_f32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x15,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_add_f32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_add_f32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x80,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_add_f32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_add_f32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x80,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_add_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_add_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0d,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_add_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_add_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0d,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_add_u32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_add_u32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x40,0x0d,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_add_u32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_add_u32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x40,0x0d,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_add_u32 v[4:5], v5
+// GFX1250: flat_atomic_add_u32 v[4:5], v5 ; encoding: [0x7c,0x40,0x0d,0xec,0x00,0x00,0x80,0x02,0x04,0x00,0x00,0x00]
+
+flat_atomic_add_u64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_add_u64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0xc0,0x10,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_add_u64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_add_u64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0xc0,0x10,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_add_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_add_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x10,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_add_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_add_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x10,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_and_b32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_and_b32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0f,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_and_b32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_and_b32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0f,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_and_b32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_and_b32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x00,0x0f,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_and_b32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_and_b32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x00,0x0f,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_and_b64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_and_b64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x40,0x12,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_and_b64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_and_b64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x40,0x12,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_and_b64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_and_b64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x12,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_and_b64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_and_b64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x12,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_cmpswap_b32 v0, v[2:3], v[4:5] th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b32 v0, v[2:3], v[4:5] th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xec,0x00,0x00,0x10,0x02,0x02,0x00,0x00,0x00]
+
+flat_atomic_cmpswap_b32 v0, v[2:3], v[4:5] th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b32 v0, v[2:3], v[4:5] th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xec,0x00,0x00,0x10,0x02,0x02,0x00,0x00,0x00]
+
+flat_atomic_cmpswap_b32 v0, v[2:3], v[4:5] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b32 v0, v[2:3], v[4:5] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xec,0x00,0x00,0x10,0x02,0x02,0xff,0x07,0x00]
+
+flat_atomic_cmpswap_b32 v0, v[2:3], v[4:5] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b32 v0, v[2:3], v[4:5] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xec,0x00,0x00,0x10,0x02,0x02,0xff,0x07,0x00]
+
+flat_atomic_cmpswap_b32 v1, v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b32 v1, v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_cmpswap_b32 v1, v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b32 v1, v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x00,0x0d,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x00,0x0d,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_cmpswap_b64 v[0:1], v[2:5] offset:-64
+// GFX1250: flat_atomic_cmpswap_b64 v[0:1], v[2:5] offset:-64 ; encoding: [0x7c,0x80,0x10,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_cmpswap_b64 v[0:1], v[2:5] offset:64
+// GFX1250: flat_atomic_cmpswap_b64 v[0:1], v[2:5] offset:64 ; encoding: [0x7c,0x80,0x10,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9] th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9] th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xec,0x02,0x00,0x10,0x03,0x04,0x00,0x00,0x00]
+
+flat_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9] th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9] th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xec,0x02,0x00,0x10,0x03,0x04,0x00,0x00,0x00]
+
+flat_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xec,0x02,0x00,0x10,0x03,0x04,0xff,0x07,0x00]
+
+flat_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xec,0x02,0x00,0x10,0x03,0x04,0xff,0x07,0x00]
+
+flat_atomic_cond_sub_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cond_sub_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x14,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_cond_sub_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_cond_sub_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x14,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_cond_sub_u32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_cond_sub_u32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x00,0x14,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_cond_sub_u32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_cond_sub_u32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x00,0x14,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_dec_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_dec_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x10,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_dec_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_dec_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x10,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_dec_u32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_dec_u32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x00,0x10,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_dec_u32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_dec_u32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x00,0x10,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_dec_u64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_dec_u64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x40,0x13,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_dec_u64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_dec_u64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x40,0x13,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_dec_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_dec_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x13,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_dec_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_dec_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x13,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_inc_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_inc_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0f,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_inc_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_inc_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0f,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_inc_u32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_inc_u32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0xc0,0x0f,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_inc_u32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_inc_u32 v[0:1], v2 offset:64 ; encoding: [0x7c,0xc0,0x0f,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_inc_u64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_inc_u64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x00,0x13,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_inc_u64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_inc_u64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x00,0x13,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_inc_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_inc_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x13,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_inc_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_inc_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x13,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_num_f32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_num_f32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x14,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_num_f32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_num_f32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x14,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_num_f32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_max_num_f32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x80,0x14,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_num_f32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_max_num_f32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x80,0x14,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_i32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_i32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0e,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_i32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_i32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0e,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_i32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_max_i32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x80,0x0e,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_i32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_max_i32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x80,0x0e,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_i64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_max_i64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0xc0,0x11,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_i64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_max_i64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0xc0,0x11,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_i64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_i64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x11,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_i64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_i64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x11,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0e,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0e,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_u32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_max_u32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0xc0,0x0e,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_u32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_max_u32 v[0:1], v2 offset:64 ; encoding: [0x7c,0xc0,0x0e,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_u64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_max_u64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x00,0x12,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_u64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_max_u64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x00,0x12,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_max_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x12,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_max_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_max_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x12,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_num_f32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_num_f32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x14,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_num_f32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_num_f32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x14,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_num_f32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_min_num_f32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x40,0x14,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_num_f32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_min_num_f32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x40,0x14,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_i32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_i32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0e,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_i32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_i32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0e,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_i32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_min_i32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x00,0x0e,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_i32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_min_i32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x00,0x0e,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_i64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_min_i64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x40,0x11,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_i64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_min_i64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x40,0x11,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_i64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_i64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x11,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_i64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_i64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x11,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0e,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0e,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_u32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_min_u32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x40,0x0e,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_u32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_min_u32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x40,0x0e,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_u64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_min_u64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x80,0x11,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_u64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_min_u64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x80,0x11,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_min_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x11,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_min_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_min_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x11,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_or_b32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_or_b32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0f,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_or_b32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_or_b32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0f,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_or_b32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_or_b32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x40,0x0f,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_or_b32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_or_b32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x40,0x0f,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_or_b64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_or_b64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x80,0x12,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_or_b64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_or_b64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x80,0x12,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_or_b64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_or_b64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x12,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_or_b64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_or_b64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x12,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_sub_clamp_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_sub_clamp_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0d,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_sub_clamp_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_sub_clamp_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0d,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_sub_clamp_u32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_sub_clamp_u32 v[0:1], v2 offset:64 ; encoding: [0x7c,0xc0,0x0d,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_sub_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_sub_u32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0d,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_sub_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_sub_u32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0d,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_sub_u32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_sub_u32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x80,0x0d,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_sub_u32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_sub_u32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x80,0x0d,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_sub_u64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_sub_u64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x00,0x11,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_sub_u64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_sub_u64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x00,0x11,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_sub_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_sub_u64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x11,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_sub_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_sub_u64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x11,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_swap_b32 v0, v[2:3], v3 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b32 v0, v[2:3], v3 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xec,0x00,0x00,0x90,0x01,0x02,0x00,0x00,0x00]
+
+flat_atomic_swap_b32 v0, v[2:3], v3 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b32 v0, v[2:3], v3 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xec,0x00,0x00,0x90,0x01,0x02,0x00,0x00,0x00]
+
+flat_atomic_swap_b32 v0, v[2:3], v3 offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b32 v0, v[2:3], v3 offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xec,0x00,0x00,0x90,0x01,0x02,0xff,0x07,0x00]
+
+flat_atomic_swap_b32 v0, v[2:3], v3 offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b32 v0, v[2:3], v3 offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xec,0x00,0x00,0x90,0x01,0x02,0xff,0x07,0x00]
+
+flat_atomic_swap_b32 v0, v[2:3], v3 offset:-2048 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b32 v0, v[2:3], v3 offset:-2048 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xec,0x00,0x00,0x90,0x01,0x02,0x00,0xf8,0xff]
+
+flat_atomic_swap_b32 v0, v[2:3], v3 offset:2048 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b32 v0, v[2:3], v3 offset:2048 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xec,0x00,0x00,0x90,0x01,0x02,0x00,0x08,0x00]
+
+flat_atomic_swap_b32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_swap_b32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_swap_b32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_swap_b32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0xc0,0x0c,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_swap_b32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_swap_b32 v[0:1], v2 offset:64 ; encoding: [0x7c,0xc0,0x0c,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_swap_b64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_swap_b64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0x40,0x10,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_swap_b64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_swap_b64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0x40,0x10,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_swap_b64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_swap_b64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_swap_b64 v[2:3], v[4:5], v[6:7] th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b64 v[2:3], v[4:5], v[6:7] th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xec,0x02,0x00,0x10,0x03,0x04,0x00,0x00,0x00]
+
+flat_atomic_swap_b64 v[2:3], v[4:5], v[6:7] th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b64 v[2:3], v[4:5], v[6:7] th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xec,0x02,0x00,0x10,0x03,0x04,0x00,0x00,0x00]
+
+flat_atomic_swap_b64 v[2:3], v[4:5], v[6:7] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b64 v[2:3], v[4:5], v[6:7] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xec,0x02,0x00,0x10,0x03,0x04,0xff,0x07,0x00]
+
+flat_atomic_swap_b64 v[2:3], v[4:5], v[6:7] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_swap_b64 v[2:3], v[4:5], v[6:7] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xec,0x02,0x00,0x10,0x03,0x04,0xff,0x07,0x00]
+
+flat_atomic_xor_b32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_xor_b32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0f,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_xor_b32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_xor_b32 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0f,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_xor_b32 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_xor_b32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x80,0x0f,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_xor_b32 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_xor_b32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x80,0x0f,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_xor_b64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_atomic_xor_b64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0xc0,0x12,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_xor_b64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_atomic_xor_b64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0xc0,0x12,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_xor_b64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_xor_b64 v[2:3], v[0:1], v[2:3] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x12,0xec,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_xor_b64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_xor_b64 v[2:3], v[0:1], v[2:3] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x12,0xec,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_pk_add_f16 v1, v[0:1], v2 offset:-8000000 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_pk_add_f16 v1, v[0:1], v2 offset:-8000000 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x16,0xec,0x01,0x00,0x10,0x01,0x00,0x00,0xee,0x85]
+
+flat_atomic_pk_add_f16 v1, v[0:1], v2 offset:8000000 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_pk_add_f16 v1, v[0:1], v2 offset:8000000 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x16,0xec,0x01,0x00,0x10,0x01,0x00,0x00,0x12,0x7a]
+
+flat_atomic_pk_add_f16 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_pk_add_f16 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x16,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_pk_add_f16 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_pk_add_f16 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x16,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_pk_add_f16 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_pk_add_f16 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x40,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_pk_add_f16 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_pk_add_f16 v[0:1], v2 offset:64 ; encoding: [0x7c,0x40,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_pk_add_f16 v[0:1], v2
+// GFX1250: flat_atomic_pk_add_f16 v[0:1], v2 ; encoding: [0x7c,0x40,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+
+flat_atomic_pk_add_bf16 v1, v[0:1], v2 offset:-8000000 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_pk_add_bf16 v1, v[0:1], v2 offset:-8000000 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x16,0xec,0x01,0x00,0x10,0x01,0x00,0x00,0xee,0x85]
+
+flat_atomic_pk_add_bf16 v1, v[0:1], v2 offset:8000000 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_pk_add_bf16 v1, v[0:1], v2 offset:8000000 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x16,0xec,0x01,0x00,0x10,0x01,0x00,0x00,0x12,0x7a]
+
+flat_atomic_pk_add_bf16 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_pk_add_bf16 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x16,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_pk_add_bf16 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_pk_add_bf16 v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x16,0xec,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_pk_add_bf16 v[0:1], v2 offset:-64
+// GFX1250: flat_atomic_pk_add_bf16 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x80,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_atomic_pk_add_bf16 v[0:1], v2 offset:64
+// GFX1250: flat_atomic_pk_add_bf16 v[0:1], v2 offset:64 ; encoding: [0x7c,0x80,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_atomic_pk_add_bf16 v[0:1], v2
+// GFX1250: flat_atomic_pk_add_bf16 v[0:1], v2 ; encoding: [0x7c,0x80,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+
+flat_load_b128 v[2:5], v[0:1] offset:-64
+// GFX1250: flat_load_b128 v[2:5], v[0:1] offset:-64 ; encoding: [0x7c,0xc0,0x05,0xec,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_b128 v[2:5], v[0:1] offset:64
+// GFX1250: flat_load_b128 v[2:5], v[0:1] offset:64 ; encoding: [0x7c,0xc0,0x05,0xec,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_b128 v[2:5], v[6:7]
+// GFX1250: flat_load_b128 v[2:5], v[6:7] ; encoding: [0x7c,0xc0,0x05,0xec,0x02,0x00,0x00,0x00,0x06,0x00,0x00,0x00]
+
+flat_load_b32 v1, v[0:1] offset:-64
+// GFX1250: flat_load_b32 v1, v[0:1] offset:-64 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_b32 v1, v[0:1] offset:64
+// GFX1250: flat_load_b32 v1, v[0:1] offset:64 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_b32 v1, v[4:5] offset:2047
+// GFX1250: flat_load_b32 v1, v[4:5] offset:2047 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0xff,0x07,0x00]
+
+flat_load_b32 v1, v[4:5] offset:-2048
+// GFX1250: flat_load_b32 v1, v[4:5] offset:-2048 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0xf8,0xff]
+
+flat_load_b32 v1, v[4:5] offset:2048
+// GFX1250: flat_load_b32 v1, v[4:5] offset:2048 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x08,0x00]
+
+flat_load_b32 v1, v[4:5] offset:-4
+// GFX1250: flat_load_b32 v1, v[4:5] offset:-4 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0xfc,0xff,0xff]
+
+flat_load_b32 v1, v[4:5] offset:4
+// GFX1250: flat_load_b32 v1, v[4:5] offset:4 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0x04,0x00,0x00]
+
+flat_load_b32 v1, v[4:5] offset:-4
+// GFX1250: flat_load_b32 v1, v[4:5] offset:-4 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0xfc,0xff,0xff]
+
+flat_load_b32 v1, v[4:5] offset:4
+// GFX1250: flat_load_b32 v1, v[4:5] offset:4 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0x04,0x00,0x00]
+
+flat_load_b32 v1, v[4:5] offset:-4
+// GFX1250: flat_load_b32 v1, v[4:5] offset:-4 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0xfc,0xff,0xff]
+
+flat_load_b32 v1, v[4:5] offset:4
+// GFX1250: flat_load_b32 v1, v[4:5] offset:4 ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0x04,0x00,0x00]
+
+flat_load_b32 v1, v[4:5]
+// GFX1250: flat_load_b32 v1, v[4:5] ; encoding: [0x7c,0x00,0x05,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_b64 v[2:3], v[0:1] offset:-64
+// GFX1250: flat_load_b64 v[2:3], v[0:1] offset:-64 ; encoding: [0x7c,0x40,0x05,0xec,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_b64 v[2:3], v[0:1] offset:64
+// GFX1250: flat_load_b64 v[2:3], v[0:1] offset:64 ; encoding: [0x7c,0x40,0x05,0xec,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_b64 v[2:3], v[4:5]
+// GFX1250: flat_load_b64 v[2:3], v[4:5] ; encoding: [0x7c,0x40,0x05,0xec,0x02,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_b96 v[2:4], v[0:1] offset:-64
+// GFX1250: flat_load_b96 v[2:4], v[0:1] offset:-64 ; encoding: [0x7c,0x80,0x05,0xec,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_b96 v[2:4], v[0:1] offset:64
+// GFX1250: flat_load_b96 v[2:4], v[0:1] offset:64 ; encoding: [0x7c,0x80,0x05,0xec,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_b96 v[2:4], v[6:7]
+// GFX1250: flat_load_b96 v[2:4], v[6:7] ; encoding: [0x7c,0x80,0x05,0xec,0x02,0x00,0x00,0x00,0x06,0x00,0x00,0x00]
+
+flat_load_d16_b16 v1, v[0:1] offset:-64
+// GFX1250: flat_load_d16_b16 v1, v[0:1] offset:-64 ; encoding: [0x7c,0x00,0x08,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_d16_b16 v1, v[0:1] offset:64
+// GFX1250: flat_load_d16_b16 v1, v[0:1] offset:64 ; encoding: [0x7c,0x00,0x08,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_d16_b16 v1, v[4:5]
+// GFX1250: flat_load_d16_b16 v1, v[4:5] ; encoding: [0x7c,0x00,0x08,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_d16_hi_b16 v1, v[0:1] offset:-64
+// GFX1250: flat_load_d16_hi_b16 v1, v[0:1] offset:-64 ; encoding: [0x7c,0xc0,0x08,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_d16_hi_b16 v1, v[0:1] offset:64
+// GFX1250: flat_load_d16_hi_b16 v1, v[0:1] offset:64 ; encoding: [0x7c,0xc0,0x08,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_d16_hi_b16 v1, v[4:5]
+// GFX1250: flat_load_d16_hi_b16 v1, v[4:5] ; encoding: [0x7c,0xc0,0x08,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_d16_hi_i8 v1, v[0:1] offset:-64
+// GFX1250: flat_load_d16_hi_i8 v1, v[0:1] offset:-64 ; encoding: [0x7c,0x80,0x08,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_d16_hi_i8 v1, v[0:1] offset:64
+// GFX1250: flat_load_d16_hi_i8 v1, v[0:1] offset:64 ; encoding: [0x7c,0x80,0x08,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_d16_hi_i8 v1, v[4:5]
+// GFX1250: flat_load_d16_hi_i8 v1, v[4:5] ; encoding: [0x7c,0x80,0x08,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_d16_hi_u8 v1, v[0:1] offset:-64
+// GFX1250: flat_load_d16_hi_u8 v1, v[0:1] offset:-64 ; encoding: [0x7c,0x40,0x08,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_d16_hi_u8 v1, v[0:1] offset:64
+// GFX1250: flat_load_d16_hi_u8 v1, v[0:1] offset:64 ; encoding: [0x7c,0x40,0x08,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_d16_hi_u8 v1, v[4:5]
+// GFX1250: flat_load_d16_hi_u8 v1, v[4:5] ; encoding: [0x7c,0x40,0x08,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_d16_i8 v1, v[0:1] offset:-64
+// GFX1250: flat_load_d16_i8 v1, v[0:1] offset:-64 ; encoding: [0x7c,0xc0,0x07,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_d16_i8 v1, v[0:1] offset:64
+// GFX1250: flat_load_d16_i8 v1, v[0:1] offset:64 ; encoding: [0x7c,0xc0,0x07,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_d16_i8 v1, v[4:5]
+// GFX1250: flat_load_d16_i8 v1, v[4:5] ; encoding: [0x7c,0xc0,0x07,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_d16_u8 v1, v[0:1] offset:-64
+// GFX1250: flat_load_d16_u8 v1, v[0:1] offset:-64 ; encoding: [0x7c,0x80,0x07,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_d16_u8 v1, v[0:1] offset:64
+// GFX1250: flat_load_d16_u8 v1, v[0:1] offset:64 ; encoding: [0x7c,0x80,0x07,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_d16_u8 v1, v[4:5]
+// GFX1250: flat_load_d16_u8 v1, v[4:5] ; encoding: [0x7c,0x80,0x07,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_i16 v1, v[0:1] offset:-64
+// GFX1250: flat_load_i16 v1, v[0:1] offset:-64 ; encoding: [0x7c,0xc0,0x04,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_i16 v1, v[0:1] offset:64
+// GFX1250: flat_load_i16 v1, v[0:1] offset:64 ; encoding: [0x7c,0xc0,0x04,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_i16 v1, v[4:5]
+// GFX1250: flat_load_i16 v1, v[4:5] ; encoding: [0x7c,0xc0,0x04,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_i8 v1, v[0:1] offset:-64
+// GFX1250: flat_load_i8 v1, v[0:1] offset:-64 ; encoding: [0x7c,0x40,0x04,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_i8 v1, v[0:1] offset:64
+// GFX1250: flat_load_i8 v1, v[0:1] offset:64 ; encoding: [0x7c,0x40,0x04,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_i8 v1, v[4:5]
+// GFX1250: flat_load_i8 v1, v[4:5] ; encoding: [0x7c,0x40,0x04,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_u16 v1, v[0:1] offset:-64
+// GFX1250: flat_load_u16 v1, v[0:1] offset:-64 ; encoding: [0x7c,0x80,0x04,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_u16 v1, v[0:1] offset:64
+// GFX1250: flat_load_u16 v1, v[0:1] offset:64 ; encoding: [0x7c,0x80,0x04,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_u16 v1, v[4:5]
+// GFX1250: flat_load_u16 v1, v[4:5] ; encoding: [0x7c,0x80,0x04,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_load_u8 v1, v[0:1] offset:-64
+// GFX1250: flat_load_u8 v1, v[0:1] offset:-64 ; encoding: [0x7c,0x00,0x04,0xec,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+flat_load_u8 v1, v[0:1] offset:64
+// GFX1250: flat_load_u8 v1, v[0:1] offset:64 ; encoding: [0x7c,0x00,0x04,0xec,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+flat_load_u8 v1, v[4:5]
+// GFX1250: flat_load_u8 v1, v[4:5] ; encoding: [0x7c,0x00,0x04,0xec,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+flat_store_b128 v[0:1], v[2:5] offset:-64
+// GFX1250: flat_store_b128 v[0:1], v[2:5] offset:-64 ; encoding: [0x7c,0x40,0x07,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_store_b128 v[0:1], v[2:5] offset:64
+// GFX1250: flat_store_b128 v[0:1], v[2:5] offset:64 ; encoding: [0x7c,0x40,0x07,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_store_b128 v[2:3], v[4:7]
+// GFX1250: flat_store_b128 v[2:3], v[4:7] ; encoding: [0x7c,0x40,0x07,0xec,0x00,0x00,0x00,0x02,0x02,0x00,0x00,0x00]
+
+flat_store_b16 v[0:1], v2 offset:-64
+// GFX1250: flat_store_b16 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x40,0x06,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_store_b16 v[0:1], v2 offset:64
+// GFX1250: flat_store_b16 v[0:1], v2 offset:64 ; encoding: [0x7c,0x40,0x06,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_store_b16 v[4:5], v1
+// GFX1250: flat_store_b16 v[4:5], v1 ; encoding: [0x7c,0x40,0x06,0xec,0x00,0x00,0x80,0x00,0x04,0x00,0x00,0x00]
+
+flat_store_b32 v[0:1], v2 offset:-64
+// GFX1250: flat_store_b32 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x80,0x06,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_store_b32 v[0:1], v2 offset:64
+// GFX1250: flat_store_b32 v[0:1], v2 offset:64 ; encoding: [0x7c,0x80,0x06,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_store_b32 v[4:5], v1 offset:-16
+// GFX1250: flat_store_b32 v[4:5], v1 offset:-16 ; encoding: [0x7c,0x80,0x06,0xec,0x00,0x00,0x80,0x00,0x04,0xf0,0xff,0xff]
+
+flat_store_b32 v[4:5], v1 offset:16
+// GFX1250: flat_store_b32 v[4:5], v1 offset:16 ; encoding: [0x7c,0x80,0x06,0xec,0x00,0x00,0x80,0x00,0x04,0x10,0x00,0x00]
+
+flat_store_b64 v[0:1], v[2:3] offset:-64
+// GFX1250: flat_store_b64 v[0:1], v[2:3] offset:-64 ; encoding: [0x7c,0xc0,0x06,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_store_b64 v[0:1], v[2:3] offset:64
+// GFX1250: flat_store_b64 v[0:1], v[2:3] offset:64 ; encoding: [0x7c,0xc0,0x06,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_store_b64 v[2:3], v[4:5]
+// GFX1250: flat_store_b64 v[2:3], v[4:5] ; encoding: [0x7c,0xc0,0x06,0xec,0x00,0x00,0x00,0x02,0x02,0x00,0x00,0x00]
+
+flat_store_b8 v[0:1], v2 offset:-64
+// GFX1250: flat_store_b8 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x00,0x06,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_store_b8 v[0:1], v2 offset:64
+// GFX1250: flat_store_b8 v[0:1], v2 offset:64 ; encoding: [0x7c,0x00,0x06,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_store_b8 v[4:5], v1
+// GFX1250: flat_store_b8 v[4:5], v1 ; encoding: [0x7c,0x00,0x06,0xec,0x00,0x00,0x80,0x00,0x04,0x00,0x00,0x00]
+
+flat_store_b96 v[0:1], v[2:4] offset:-64
+// GFX1250: flat_store_b96 v[0:1], v[2:4] offset:-64 ; encoding: [0x7c,0x00,0x07,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_store_b96 v[0:1], v[2:4] offset:64
+// GFX1250: flat_store_b96 v[0:1], v[2:4] offset:64 ; encoding: [0x7c,0x00,0x07,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_store_b96 v[2:3], v[4:6]
+// GFX1250: flat_store_b96 v[2:3], v[4:6] ; encoding: [0x7c,0x00,0x07,0xec,0x00,0x00,0x00,0x02,0x02,0x00,0x00,0x00]
+
+flat_store_d16_hi_b16 v[0:1], v2 offset:-64
+// GFX1250: flat_store_d16_hi_b16 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x40,0x09,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_store_d16_hi_b16 v[0:1], v2 offset:64
+// GFX1250: flat_store_d16_hi_b16 v[0:1], v2 offset:64 ; encoding: [0x7c,0x40,0x09,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_store_d16_hi_b16 v[4:5], v1
+// GFX1250: flat_store_d16_hi_b16 v[4:5], v1 ; encoding: [0x7c,0x40,0x09,0xec,0x00,0x00,0x80,0x00,0x04,0x00,0x00,0x00]
+
+flat_store_d16_hi_b8 v[0:1], v2 offset:-64
+// GFX1250: flat_store_d16_hi_b8 v[0:1], v2 offset:-64 ; encoding: [0x7c,0x00,0x09,0xec,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+flat_store_d16_hi_b8 v[0:1], v2 offset:64
+// GFX1250: flat_store_d16_hi_b8 v[0:1], v2 offset:64 ; encoding: [0x7c,0x00,0x09,0xec,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+flat_store_d16_hi_b8 v[4:5], v1
+// GFX1250: flat_store_d16_hi_b8 v[4:5], v1 ; encoding: [0x7c,0x00,0x09,0xec,0x00,0x00,0x80,0x00,0x04,0x00,0x00,0x00]
+
+global_atomic_add_f32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_add_f32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_f32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_add_f32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x80,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_f32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_f32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x15,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_f32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_f32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x15,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_f32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_f32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x15,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_f32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_f32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x15,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_f32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_add_f32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x80,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_f32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_add_f32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x80,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_u32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_add_u32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_u32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_add_u32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x40,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_u32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_add_u32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x40,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_u32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_add_u32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x40,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_u64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_add_u64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_u64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_add_u64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0xc0,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_u64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_add_u64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0xc0,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_u64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_add_u64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0xc0,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_add_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_add_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_add_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_and_b32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_and_b32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_and_b32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_and_b32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x00,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_and_b32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_and_b32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_and_b32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_and_b32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_and_b32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_and_b32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_and_b32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_and_b32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_and_b32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_and_b32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x00,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_and_b32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_and_b32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x00,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_and_b64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_and_b64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x40,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_and_b64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_and_b64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x40,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_and_b64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_and_b64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x40,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_and_b64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_and_b64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x40,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_and_b64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_and_b64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_and_b64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_and_b64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_and_b64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_and_b64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_and_b64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_and_b64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cmpswap_b32 v0, v1, v[2:3], s[2:3] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v0, v1, v[2:3], s[2:3] th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0d,0xee,0x00,0x00,0x10,0x01,0x01,0x00,0x00,0x00]
+
+global_atomic_cmpswap_b32 v0, v1, v[2:3], s[2:3] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v0, v1, v[2:3], s[2:3] th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0d,0xee,0x00,0x00,0x10,0x01,0x01,0x00,0x00,0x00]
+
+global_atomic_cmpswap_b32 v0, v1, v[2:3], s[2:3] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v0, v1, v[2:3], s[2:3] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0d,0xee,0x00,0x00,0x10,0x01,0x01,0xff,0x07,0x00]
+
+global_atomic_cmpswap_b32 v0, v1, v[2:3], s[2:3] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v0, v1, v[2:3], s[2:3] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0d,0xee,0x00,0x00,0x10,0x01,0x01,0xff,0x07,0x00]
+
+global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xee,0x00,0x00,0x10,0x02,0x02,0x00,0x00,0x00]
+
+global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xee,0x00,0x00,0x10,0x02,0x02,0x00,0x00,0x00]
+
+global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xee,0x00,0x00,0x10,0x02,0x02,0xff,0x07,0x00]
+
+global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xee,0x00,0x00,0x10,0x02,0x02,0xff,0x07,0x00]
+
+global_atomic_cmpswap_b32 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_cmpswap_b32 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x00,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cmpswap_b32 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_cmpswap_b32 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x00,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cmpswap_b32 v1, v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v1, v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cmpswap_b32 v1, v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v1, v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cmpswap_b32 v1, v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v1, v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cmpswap_b32 v1, v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b32 v1, v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cmpswap_b32 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_cmpswap_b32 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x00,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cmpswap_b32 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_cmpswap_b32 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x00,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cmpswap_b64 v0, v[2:5], s[0:1] offset:-64
+// GFX1250: global_atomic_cmpswap_b64 v0, v[2:5], s[0:1] offset:-64 ; encoding: [0x00,0x80,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cmpswap_b64 v0, v[2:5], s[0:1] offset:64
+// GFX1250: global_atomic_cmpswap_b64 v0, v[2:5], s[0:1] offset:64 ; encoding: [0x00,0x80,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cmpswap_b64 v[0:1], v[2:5], off offset:-64
+// GFX1250: global_atomic_cmpswap_b64 v[0:1], v[2:5], off offset:-64 ; encoding: [0x7c,0x80,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cmpswap_b64 v[0:1], v[2:5], off offset:64
+// GFX1250: global_atomic_cmpswap_b64 v[0:1], v[2:5], off offset:64 ; encoding: [0x7c,0x80,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v0, v[2:5], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v0, v[2:5], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cmpswap_b64 v[2:3], v0, v[2:5], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v0, v[2:5], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v3, v[6:9], s[2:3] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v3, v[6:9], s[2:3] th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x10,0xee,0x02,0x00,0x10,0x03,0x03,0x00,0x00,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v3, v[6:9], s[2:3] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v3, v[6:9], s[2:3] th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x10,0xee,0x02,0x00,0x10,0x03,0x03,0x00,0x00,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v3, v[6:9], s[2:3] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v3, v[6:9], s[2:3] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x10,0xee,0x02,0x00,0x10,0x03,0x03,0xff,0x07,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v3, v[6:9], s[2:3] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v3, v[6:9], s[2:3] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x10,0xee,0x02,0x00,0x10,0x03,0x03,0xff,0x07,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9], off th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9], off th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xee,0x02,0x00,0x10,0x03,0x04,0x00,0x00,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9], off th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9], off th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xee,0x02,0x00,0x10,0x03,0x04,0x00,0x00,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9], off offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9], off offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xee,0x02,0x00,0x10,0x03,0x04,0xff,0x07,0x00]
+
+global_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9], off offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cmpswap_b64 v[2:3], v[4:5], v[6:9], off offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x10,0xee,0x02,0x00,0x10,0x03,0x04,0xff,0x07,0x00]
+
+global_atomic_cond_sub_u32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_cond_sub_u32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cond_sub_u32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_cond_sub_u32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x00,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cond_sub_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cond_sub_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cond_sub_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cond_sub_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cond_sub_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cond_sub_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cond_sub_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_cond_sub_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_cond_sub_u32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_cond_sub_u32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x00,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_cond_sub_u32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_cond_sub_u32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x00,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_clamp_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_clamp_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_clamp_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_clamp_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_clamp_u32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_sub_clamp_u32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_clamp_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_clamp_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_clamp_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_clamp_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_clamp_u32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_sub_clamp_u32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0xc0,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_pk_add_f16 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_f16 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_pk_add_f16 v1, v0, v2, s[0:1] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_f16 v1, v0, v2, s[0:1] th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0x00,0x00,0x00]
+
+global_atomic_pk_add_f16 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_f16 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_pk_add_f16 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_f16 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_pk_add_f16 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_f16 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_pk_add_f16 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_pk_add_f16 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_pk_add_f16 v0, v2, s[0:1]
+// GFX1250: global_atomic_pk_add_f16 v0, v2, s[0:1] ; encoding: [0x00,0x40,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+
+global_atomic_pk_add_f16 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_pk_add_f16 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x40,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_pk_add_f16 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_pk_add_f16 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x40,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_pk_add_f16 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_pk_add_f16 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x40,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_pk_add_bf16 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_bf16 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_pk_add_bf16 v1, v0, v2, s[0:1] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_bf16 v1, v0, v2, s[0:1] th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0x00,0x00,0x00]
+
+global_atomic_pk_add_bf16 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_bf16 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_pk_add_bf16 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_bf16 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_pk_add_bf16 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_pk_add_bf16 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x16,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_pk_add_bf16 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_pk_add_bf16 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_pk_add_bf16 v0, v2, s[0:1]
+// GFX1250: global_atomic_pk_add_bf16 v0, v2, s[0:1] ; encoding: [0x00,0x80,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+
+global_atomic_pk_add_bf16 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_pk_add_bf16 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x80,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_pk_add_bf16 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_pk_add_bf16 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x80,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_pk_add_bf16 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_pk_add_bf16 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x80,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_dec_u32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_dec_u32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_dec_u32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_dec_u32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x00,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_dec_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_dec_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x10,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_dec_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_dec_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x10,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_dec_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_dec_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x10,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_dec_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_dec_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x10,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_dec_u32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_dec_u32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x00,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_dec_u32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_dec_u32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x00,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_dec_u64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_dec_u64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x40,0x13,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_dec_u64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_dec_u64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x40,0x13,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_dec_u64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_dec_u64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x40,0x13,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_dec_u64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_dec_u64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x40,0x13,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_dec_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_dec_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x13,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_dec_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_dec_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x13,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_dec_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_dec_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x13,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_dec_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_dec_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x13,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_inc_u32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_inc_u32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_inc_u32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_inc_u32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_inc_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_inc_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_inc_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_inc_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_inc_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_inc_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_inc_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_inc_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_inc_u32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_inc_u32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0xc0,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_inc_u32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_inc_u32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0xc0,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_inc_u64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_inc_u64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x00,0x13,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_inc_u64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_inc_u64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x00,0x13,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_inc_u64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_inc_u64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x00,0x13,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_inc_u64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_inc_u64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x00,0x13,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_inc_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_inc_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x13,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_inc_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_inc_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x13,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_inc_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_inc_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x13,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_inc_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_inc_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x13,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_num_f32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_max_num_f32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_num_f32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_max_num_f32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x80,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_num_f32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_num_f32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_num_f32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_num_f32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_num_f32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_num_f32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_num_f32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_num_f32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_num_f32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_max_num_f32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x80,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_num_f32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_max_num_f32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x80,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_i32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_max_i32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_i32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_max_i32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x80,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_i32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_i32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_i32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_i32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_i32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_i32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_i32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_i32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_i32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_max_i32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x80,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_i32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_max_i32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x80,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_i64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_max_i64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_i64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_max_i64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0xc0,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_i64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_max_i64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0xc0,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_i64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_max_i64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0xc0,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_i64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_i64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_i64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_i64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_i64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_i64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_i64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_i64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_u32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_max_u32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_u32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_max_u32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_u32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_max_u32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0xc0,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_u32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_max_u32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0xc0,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_u64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_max_u64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x00,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_u64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_max_u64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x00,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_u64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_max_u64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x00,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_u64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_max_u64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x00,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_max_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_max_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_max_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_num_f32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_min_num_f32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_num_f32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_min_num_f32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x40,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_num_f32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_num_f32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_num_f32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_num_f32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_num_f32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_num_f32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_num_f32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_num_f32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x14,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_num_f32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_min_num_f32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x40,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_num_f32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_min_num_f32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x40,0x14,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_i32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_min_i32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_i32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_min_i32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x00,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_i32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_i32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_i32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_i32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_i32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_i32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_i32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_i32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_i32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_min_i32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x00,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_i32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_min_i32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x00,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_i64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_min_i64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x40,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_i64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_min_i64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x40,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_i64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_min_i64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x40,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_i64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_min_i64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x40,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_i64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_i64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_i64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_i64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_i64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_i64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_i64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_i64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_u32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_min_u32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_u32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_min_u32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x40,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0e,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_u32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_min_u32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x40,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_u32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_min_u32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x40,0x0e,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_u64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_min_u64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x80,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_u64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_min_u64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x80,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_u64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_min_u64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x80,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_u64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_min_u64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x80,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_min_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_min_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_min_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_or_b32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_or_b32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_or_b32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_or_b32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x40,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_or_b32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_or_b32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_or_b32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_or_b32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_or_b32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_or_b32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_or_b32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_or_b32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_or_b32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_or_b32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x40,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_or_b32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_or_b32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x40,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_or_b64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_or_b64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x80,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_or_b64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_or_b64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x80,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_or_b64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_or_b64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x80,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_or_b64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_or_b64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x80,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_or_b64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_or_b64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_or_b64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_or_b64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_or_b64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_or_b64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_or_b64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_or_b64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_u32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_sub_u32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_u32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_sub_u32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x80,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_u32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_u32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_u32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_u32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0d,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_u32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_sub_u32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x80,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_u32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_sub_u32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x80,0x0d,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_u64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_sub_u64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x00,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_u64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_sub_u64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x00,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_u64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_sub_u64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x00,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_u64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_sub_u64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x00,0x11,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_u64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_u64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x00,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_sub_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_u64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_sub_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_sub_u64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x00,0x11,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_swap_b32 v0, v1, v3, s[2:3] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v0, v1, v3, s[2:3] th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0c,0xee,0x00,0x00,0x90,0x01,0x01,0x00,0x00,0x00]
+
+global_atomic_swap_b32 v0, v1, v3, s[2:3] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v0, v1, v3, s[2:3] th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0c,0xee,0x00,0x00,0x90,0x01,0x01,0x00,0x00,0x00]
+
+global_atomic_swap_b32 v0, v1, v3, s[2:3] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v0, v1, v3, s[2:3] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0c,0xee,0x00,0x00,0x90,0x01,0x01,0xff,0x07,0x00]
+
+global_atomic_swap_b32 v0, v1, v3, s[2:3] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v0, v1, v3, s[2:3] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0c,0xee,0x00,0x00,0x90,0x01,0x01,0xff,0x07,0x00]
+
+global_atomic_swap_b32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_swap_b32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x0c,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_swap_b32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_swap_b32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x0c,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_swap_b32 v0, v[2:3], v3, off th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v0, v[2:3], v3, off th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xee,0x00,0x00,0x90,0x01,0x02,0x00,0x00,0x00]
+
+global_atomic_swap_b32 v0, v[2:3], v3, off th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v0, v[2:3], v3, off th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xee,0x00,0x00,0x90,0x01,0x02,0x00,0x00,0x00]
+
+global_atomic_swap_b32 v0, v[2:3], v3, off offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v0, v[2:3], v3, off offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xee,0x00,0x00,0x90,0x01,0x02,0xff,0x07,0x00]
+
+global_atomic_swap_b32 v0, v[2:3], v3, off offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v0, v[2:3], v3, off offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xee,0x00,0x00,0x90,0x01,0x02,0xff,0x07,0x00]
+
+global_atomic_swap_b32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x0c,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_swap_b32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x0c,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_swap_b32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_swap_b32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x0c,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_swap_b32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_swap_b32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0xc0,0x0c,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_swap_b32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_swap_b32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0xc0,0x0c,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_swap_b64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_swap_b64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0x40,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_swap_b64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_swap_b64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0x40,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_swap_b64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_swap_b64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0x40,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_swap_b64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_swap_b64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0x40,0x10,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_swap_b64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_swap_b64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x40,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_swap_b64 v[2:3], v3, v[6:7], s[2:3] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v3, v[6:7], s[2:3] th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x10,0xee,0x02,0x00,0x10,0x03,0x03,0x00,0x00,0x00]
+
+global_atomic_swap_b64 v[2:3], v3, v[6:7], s[2:3] th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v3, v[6:7], s[2:3] th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x10,0xee,0x02,0x00,0x10,0x03,0x03,0x00,0x00,0x00]
+
+global_atomic_swap_b64 v[2:3], v3, v[6:7], s[2:3] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v3, v[6:7], s[2:3] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x10,0xee,0x02,0x00,0x10,0x03,0x03,0xff,0x07,0x00]
+
+global_atomic_swap_b64 v[2:3], v3, v[6:7], s[2:3] offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v3, v[6:7], s[2:3] offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x10,0xee,0x02,0x00,0x10,0x03,0x03,0xff,0x07,0x00]
+
+global_atomic_swap_b64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_swap_b64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_swap_b64 v[2:3], v[4:5], v[6:7], off th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v[4:5], v[6:7], off th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xee,0x02,0x00,0x10,0x03,0x04,0x00,0x00,0x00]
+
+global_atomic_swap_b64 v[2:3], v[4:5], v[6:7], off th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v[4:5], v[6:7], off th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xee,0x02,0x00,0x10,0x03,0x04,0x00,0x00,0x00]
+
+global_atomic_swap_b64 v[2:3], v[4:5], v[6:7], off offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v[4:5], v[6:7], off offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xee,0x02,0x00,0x10,0x03,0x04,0xff,0x07,0x00]
+
+global_atomic_swap_b64 v[2:3], v[4:5], v[6:7], off offset:2047 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_swap_b64 v[2:3], v[4:5], v[6:7], off offset:2047 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x40,0x10,0xee,0x02,0x00,0x10,0x03,0x04,0xff,0x07,0x00]
+
+global_atomic_xor_b32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_atomic_xor_b32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_xor_b32 v0, v2, s[0:1] offset:64
+// GFX1250: global_atomic_xor_b32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x80,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_xor_b32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_xor_b32 v1, v0, v2, s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_xor_b32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_xor_b32 v1, v0, v2, s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0x80,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_xor_b32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_xor_b32 v1, v[0:1], v2, off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_xor_b32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_xor_b32 v1, v[0:1], v2, off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x0f,0xee,0x01,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_xor_b32 v[0:1], v2, off offset:-64
+// GFX1250: global_atomic_xor_b32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x80,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_xor_b32 v[0:1], v2, off offset:64
+// GFX1250: global_atomic_xor_b32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x80,0x0f,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_xor_b64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_atomic_xor_b64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_xor_b64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_atomic_xor_b64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0xc0,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_xor_b64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_atomic_xor_b64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0xc0,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_xor_b64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_atomic_xor_b64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0xc0,0x12,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_xor_b64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_xor_b64 v[2:3], v0, v[2:3], s[0:1] offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_xor_b64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_xor_b64 v[2:3], v0, v[2:3], s[0:1] offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x00,0xc0,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_atomic_xor_b64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_xor_b64 v[2:3], v[0:1], v[2:3], off offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0xc0,0xff,0xff]
+
+global_atomic_xor_b64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN
+// GFX1250: global_atomic_xor_b64 v[2:3], v[0:1], v[2:3], off offset:64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0xc0,0x12,0xee,0x02,0x00,0x10,0x01,0x00,0x40,0x00,0x00]
+
+global_load_addtid_b32 v1, off offset:-64
+// GFX1250: global_load_addtid_b32 v1, off offset:-64 ; encoding: [0x7c,0x00,0x0a,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_addtid_b32 v1, off offset:64
+// GFX1250: global_load_addtid_b32 v1, off offset:64 ; encoding: [0x7c,0x00,0x0a,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_addtid_b32 v1, off
+// GFX1250: global_load_addtid_b32 v1, off ; encoding: [0x7c,0x00,0x0a,0xee,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_load_addtid_b32 v1, s[0:1] offset:-64
+// GFX1250: global_load_addtid_b32 v1, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x0a,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_addtid_b32 v1, s[0:1] offset:64
+// GFX1250: global_load_addtid_b32 v1, s[0:1] offset:64 ; encoding: [0x00,0x00,0x0a,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_addtid_b32 v1, s[2:3]
+// GFX1250: global_load_addtid_b32 v1, s[2:3] ; encoding: [0x02,0x00,0x0a,0xee,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_load_b128 v[2:5], v0, s[0:1] offset:-64
+// GFX1250: global_load_b128 v[2:5], v0, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b128 v[2:5], v0, s[0:1] offset:64
+// GFX1250: global_load_b128 v[2:5], v0, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b128 v[2:5], v5, s[2:3]
+// GFX1250: global_load_b128 v[2:5], v5, s[2:3] ; encoding: [0x02,0xc0,0x05,0xee,0x02,0x00,0x00,0x00,0x05,0x00,0x00,0x00]
+
+global_load_b128 v[2:5], v[0:1], off offset:-64
+// GFX1250: global_load_b128 v[2:5], v[0:1], off offset:-64 ; encoding: [0x7c,0xc0,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b128 v[2:5], v[0:1], off offset:64
+// GFX1250: global_load_b128 v[2:5], v[0:1], off offset:64 ; encoding: [0x7c,0xc0,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b128 v[2:5], v[6:7], off
+// GFX1250: global_load_b128 v[2:5], v[6:7], off ; encoding: [0x7c,0xc0,0x05,0xee,0x02,0x00,0x00,0x00,0x06,0x00,0x00,0x00]
+
+global_load_b32 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_b32 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x05,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b32 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_b32 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0x00,0x05,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b32 v1, v3, s[2:3] offset:2047
+// GFX1250: global_load_b32 v1, v3, s[2:3] offset:2047 ; encoding: [0x02,0x00,0x05,0xee,0x01,0x00,0x00,0x00,0x03,0xff,0x07,0x00]
+
+global_load_b32 v1, v3, s[2:3]
+// GFX1250: global_load_b32 v1, v3, s[2:3] ; encoding: [0x02,0x00,0x05,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_b32 v1, v[0:1], off offset:-64
+// GFX1250: global_load_b32 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0x00,0x05,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b32 v1, v[0:1], off offset:64
+// GFX1250: global_load_b32 v1, v[0:1], off offset:64 ; encoding: [0x7c,0x00,0x05,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b32 v1, v[4:5], off offset:2047
+// GFX1250: global_load_b32 v1, v[4:5], off offset:2047 ; encoding: [0x7c,0x00,0x05,0xee,0x01,0x00,0x00,0x00,0x04,0xff,0x07,0x00]
+
+global_load_b32 v1, v[4:5], off
+// GFX1250: global_load_b32 v1, v[4:5], off ; encoding: [0x7c,0x00,0x05,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_b64 v[2:3], v0, s[0:1] offset:-64
+// GFX1250: global_load_b64 v[2:3], v0, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b64 v[2:3], v0, s[0:1] offset:64
+// GFX1250: global_load_b64 v[2:3], v0, s[0:1] offset:64 ; encoding: [0x00,0x40,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b64 v[2:3], v3, s[2:3]
+// GFX1250: global_load_b64 v[2:3], v3, s[2:3] ; encoding: [0x02,0x40,0x05,0xee,0x02,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_b64 v[2:3], v[0:1], off offset:-64
+// GFX1250: global_load_b64 v[2:3], v[0:1], off offset:-64 ; encoding: [0x7c,0x40,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b64 v[2:3], v[0:1], off offset:64
+// GFX1250: global_load_b64 v[2:3], v[0:1], off offset:64 ; encoding: [0x7c,0x40,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b64 v[2:3], v[4:5], off
+// GFX1250: global_load_b64 v[2:3], v[4:5], off ; encoding: [0x7c,0x40,0x05,0xee,0x02,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_b96 v[2:4], v0, s[0:1] offset:-64
+// GFX1250: global_load_b96 v[2:4], v0, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b96 v[2:4], v0, s[0:1] offset:64
+// GFX1250: global_load_b96 v[2:4], v0, s[0:1] offset:64 ; encoding: [0x00,0x80,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b96 v[2:4], v5, s[2:3]
+// GFX1250: global_load_b96 v[2:4], v5, s[2:3] ; encoding: [0x02,0x80,0x05,0xee,0x02,0x00,0x00,0x00,0x05,0x00,0x00,0x00]
+
+global_load_b96 v[2:4], v[0:1], off offset:-64
+// GFX1250: global_load_b96 v[2:4], v[0:1], off offset:-64 ; encoding: [0x7c,0x80,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_b96 v[2:4], v[0:1], off offset:64
+// GFX1250: global_load_b96 v[2:4], v[0:1], off offset:64 ; encoding: [0x7c,0x80,0x05,0xee,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_b96 v[2:4], v[6:7], off
+// GFX1250: global_load_b96 v[2:4], v[6:7], off ; encoding: [0x7c,0x80,0x05,0xee,0x02,0x00,0x00,0x00,0x06,0x00,0x00,0x00]
+
+global_load_d16_b16 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_d16_b16 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_b16 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_d16_b16 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0x00,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_b16 v1, v3, s[2:3]
+// GFX1250: global_load_d16_b16 v1, v3, s[2:3] ; encoding: [0x02,0x00,0x08,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_d16_b16 v1, v[0:1], off offset:-64
+// GFX1250: global_load_d16_b16 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0x00,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_b16 v1, v[0:1], off offset:64
+// GFX1250: global_load_d16_b16 v1, v[0:1], off offset:64 ; encoding: [0x7c,0x00,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_b16 v1, v[4:5], off
+// GFX1250: global_load_d16_b16 v1, v[4:5], off ; encoding: [0x7c,0x00,0x08,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_d16_hi_b16 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_d16_hi_b16 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_hi_b16 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_d16_hi_b16 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_hi_b16 v1, v3, s[2:3]
+// GFX1250: global_load_d16_hi_b16 v1, v3, s[2:3] ; encoding: [0x02,0xc0,0x08,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_d16_hi_b16 v1, v[0:1], off offset:-64
+// GFX1250: global_load_d16_hi_b16 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0xc0,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_hi_b16 v1, v[0:1], off offset:64
+// GFX1250: global_load_d16_hi_b16 v1, v[0:1], off offset:64 ; encoding: [0x7c,0xc0,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_hi_b16 v1, v[4:5], off
+// GFX1250: global_load_d16_hi_b16 v1, v[4:5], off ; encoding: [0x7c,0xc0,0x08,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_d16_hi_i8 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_d16_hi_i8 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_hi_i8 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_d16_hi_i8 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0x80,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_hi_i8 v1, v3, s[2:3]
+// GFX1250: global_load_d16_hi_i8 v1, v3, s[2:3] ; encoding: [0x02,0x80,0x08,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_d16_hi_i8 v1, v[0:1], off offset:-64
+// GFX1250: global_load_d16_hi_i8 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0x80,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_hi_i8 v1, v[0:1], off offset:64
+// GFX1250: global_load_d16_hi_i8 v1, v[0:1], off offset:64 ; encoding: [0x7c,0x80,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_hi_i8 v1, v[4:5], off
+// GFX1250: global_load_d16_hi_i8 v1, v[4:5], off ; encoding: [0x7c,0x80,0x08,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_d16_hi_u8 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_d16_hi_u8 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_hi_u8 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_d16_hi_u8 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0x40,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_hi_u8 v1, v3, s[2:3]
+// GFX1250: global_load_d16_hi_u8 v1, v3, s[2:3] ; encoding: [0x02,0x40,0x08,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_d16_hi_u8 v1, v[0:1], off offset:-64
+// GFX1250: global_load_d16_hi_u8 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0x40,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_hi_u8 v1, v[0:1], off offset:64
+// GFX1250: global_load_d16_hi_u8 v1, v[0:1], off offset:64 ; encoding: [0x7c,0x40,0x08,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_hi_u8 v1, v[4:5], off
+// GFX1250: global_load_d16_hi_u8 v1, v[4:5], off ; encoding: [0x7c,0x40,0x08,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_d16_i8 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_d16_i8 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x07,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_i8 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_d16_i8 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x07,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_i8 v1, v3, s[2:3]
+// GFX1250: global_load_d16_i8 v1, v3, s[2:3] ; encoding: [0x02,0xc0,0x07,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_d16_i8 v1, v[0:1], off offset:-64
+// GFX1250: global_load_d16_i8 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0xc0,0x07,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_i8 v1, v[0:1], off offset:64
+// GFX1250: global_load_d16_i8 v1, v[0:1], off offset:64 ; encoding: [0x7c,0xc0,0x07,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_i8 v1, v[4:5], off
+// GFX1250: global_load_d16_i8 v1, v[4:5], off ; encoding: [0x7c,0xc0,0x07,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_d16_u8 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_d16_u8 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x07,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_u8 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_d16_u8 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0x80,0x07,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_u8 v1, v3, s[2:3]
+// GFX1250: global_load_d16_u8 v1, v3, s[2:3] ; encoding: [0x02,0x80,0x07,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_d16_u8 v1, v[0:1], off offset:-64
+// GFX1250: global_load_d16_u8 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0x80,0x07,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_d16_u8 v1, v[0:1], off offset:64
+// GFX1250: global_load_d16_u8 v1, v[0:1], off offset:64 ; encoding: [0x7c,0x80,0x07,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_d16_u8 v1, v[4:5], off
+// GFX1250: global_load_d16_u8 v1, v[4:5], off ; encoding: [0x7c,0x80,0x07,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_i16 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_i16 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_i16 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_i16 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_i16 v1, v3, s[2:3]
+// GFX1250: global_load_i16 v1, v3, s[2:3] ; encoding: [0x02,0xc0,0x04,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_i16 v1, v[0:1], off offset:-64
+// GFX1250: global_load_i16 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0xc0,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_i16 v1, v[0:1], off offset:64
+// GFX1250: global_load_i16 v1, v[0:1], off offset:64 ; encoding: [0x7c,0xc0,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_i16 v1, v[4:5], off
+// GFX1250: global_load_i16 v1, v[4:5], off ; encoding: [0x7c,0xc0,0x04,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_i8 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_i8 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_i8 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_i8 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0x40,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_i8 v1, v3, s[2:3]
+// GFX1250: global_load_i8 v1, v3, s[2:3] ; encoding: [0x02,0x40,0x04,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_i8 v1, v[0:1], off offset:-64
+// GFX1250: global_load_i8 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0x40,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_i8 v1, v[0:1], off offset:64
+// GFX1250: global_load_i8 v1, v[0:1], off offset:64 ; encoding: [0x7c,0x40,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_i8 v1, v[4:5], off
+// GFX1250: global_load_i8 v1, v[4:5], off ; encoding: [0x7c,0x40,0x04,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_u16 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_u16 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_u16 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_u16 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0x80,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_u16 v1, v3, s[2:3]
+// GFX1250: global_load_u16 v1, v3, s[2:3] ; encoding: [0x02,0x80,0x04,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_u16 v1, v[0:1], off offset:-64
+// GFX1250: global_load_u16 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0x80,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_u16 v1, v[0:1], off offset:64
+// GFX1250: global_load_u16 v1, v[0:1], off offset:64 ; encoding: [0x7c,0x80,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_u16 v1, v[4:5], off
+// GFX1250: global_load_u16 v1, v[4:5], off ; encoding: [0x7c,0x80,0x04,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_u8 v1, v0, s[0:1] offset:-64
+// GFX1250: global_load_u8 v1, v0, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_u8 v1, v0, s[0:1] offset:64
+// GFX1250: global_load_u8 v1, v0, s[0:1] offset:64 ; encoding: [0x00,0x00,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_u8 v1, v3, s[2:3]
+// GFX1250: global_load_u8 v1, v3, s[2:3] ; encoding: [0x02,0x00,0x04,0xee,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00]
+
+global_load_u8 v1, v[0:1], off offset:-64
+// GFX1250: global_load_u8 v1, v[0:1], off offset:-64 ; encoding: [0x7c,0x00,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_u8 v1, v[0:1], off offset:64
+// GFX1250: global_load_u8 v1, v[0:1], off offset:64 ; encoding: [0x7c,0x00,0x04,0xee,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_u8 v1, v[4:5], off
+// GFX1250: global_load_u8 v1, v[4:5], off ; encoding: [0x7c,0x00,0x04,0xee,0x01,0x00,0x00,0x00,0x04,0x00,0x00,0x00]
+
+global_load_block v[8:39], v0, s[0:1] offset:-64
+// GFX1250: global_load_block v[8:39], v0, s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x14,0xee,0x08,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_block v[8:39], v0, s[0:1] offset:64
+// GFX1250: global_load_block v[8:39], v0, s[0:1] offset:64 ; encoding: [0x00,0xc0,0x14,0xee,0x08,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_block v[8:39], v5, s[2:3]
+// GFX1250: global_load_block v[8:39], v5, s[2:3] ; encoding: [0x02,0xc0,0x14,0xee,0x08,0x00,0x00,0x00,0x05,0x00,0x00,0x00]
+
+global_load_block v[8:39], v[0:1], off offset:-64
+// GFX1250: global_load_block v[8:39], v[0:1], off offset:-64 ; encoding: [0x7c,0xc0,0x14,0xee,0x08,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+global_load_block v[8:39], v[0:1], off offset:64
+// GFX1250: global_load_block v[8:39], v[0:1], off offset:64 ; encoding: [0x7c,0xc0,0x14,0xee,0x08,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+global_load_block v[8:39], v[6:7], off
+// GFX1250: global_load_block v[8:39], v[6:7], off ; encoding: [0x7c,0xc0,0x14,0xee,0x08,0x00,0x00,0x00,0x06,0x00,0x00,0x00]
+
+global_load_block v[8:39], v[6:7], off th:TH_LOAD_HT scope:SCOPE_SE
+// GFX1250: global_load_block v[8:39], v[6:7], off th:TH_LOAD_HT scope:SCOPE_SE ; encoding: [0x7c,0xc0,0x14,0xee,0x08,0x00,0x24,0x00,0x06,0x00,0x00,0x00]
+
+global_store_addtid_b32 v2, off offset:-64
+// GFX1250: global_store_addtid_b32 v2, off offset:-64 ; encoding: [0x7c,0x40,0x0a,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_addtid_b32 v2, off offset:64
+// GFX1250: global_store_addtid_b32 v2, off offset:64 ; encoding: [0x7c,0x40,0x0a,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_addtid_b32 v2, s[0:1] offset:-64
+// GFX1250: global_store_addtid_b32 v2, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x0a,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_addtid_b32 v2, s[0:1] offset:64
+// GFX1250: global_store_addtid_b32 v2, s[0:1] offset:64 ; encoding: [0x00,0x40,0x0a,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b128 v0, v[2:5], s[0:1] offset:-64
+// GFX1250: global_store_b128 v0, v[2:5], s[0:1] offset:-64 ; encoding: [0x00,0x40,0x07,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b128 v0, v[2:5], s[0:1] offset:64
+// GFX1250: global_store_b128 v0, v[2:5], s[0:1] offset:64 ; encoding: [0x00,0x40,0x07,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b128 v1, v[4:7], s[2:3]
+// GFX1250: global_store_b128 v1, v[4:7], s[2:3] ; encoding: [0x02,0x40,0x07,0xee,0x00,0x00,0x00,0x02,0x01,0x00,0x00,0x00]
+
+global_store_b128 v[0:1], v[2:5], off offset:-64
+// GFX1250: global_store_b128 v[0:1], v[2:5], off offset:-64 ; encoding: [0x7c,0x40,0x07,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b128 v[0:1], v[2:5], off offset:64
+// GFX1250: global_store_b128 v[0:1], v[2:5], off offset:64 ; encoding: [0x7c,0x40,0x07,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b128 v[2:3], v[4:7], off
+// GFX1250: global_store_b128 v[2:3], v[4:7], off ; encoding: [0x7c,0x40,0x07,0xee,0x00,0x00,0x00,0x02,0x02,0x00,0x00,0x00]
+
+global_store_b16 v0, v2, s[0:1] offset:-64
+// GFX1250: global_store_b16 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b16 v0, v2, s[0:1] offset:64
+// GFX1250: global_store_b16 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x40,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b16 v3, v1, s[2:3]
+// GFX1250: global_store_b16 v3, v1, s[2:3] ; encoding: [0x02,0x40,0x06,0xee,0x00,0x00,0x80,0x00,0x03,0x00,0x00,0x00]
+
+global_store_b16 v[0:1], v2, off offset:-64
+// GFX1250: global_store_b16 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x40,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b16 v[0:1], v2, off offset:64
+// GFX1250: global_store_b16 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x40,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b16 v[4:5], v1, off
+// GFX1250: global_store_b16 v[4:5], v1, off ; encoding: [0x7c,0x40,0x06,0xee,0x00,0x00,0x80,0x00,0x04,0x00,0x00,0x00]
+
+global_store_b32 v0, v2, s[0:1] offset:-64
+// GFX1250: global_store_b32 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b32 v0, v2, s[0:1] offset:64
+// GFX1250: global_store_b32 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b32 v3, v1, s[2:3] offset:-16
+// GFX1250: global_store_b32 v3, v1, s[2:3] offset:-16 ; encoding: [0x02,0x80,0x06,0xee,0x00,0x00,0x80,0x00,0x03,0xf0,0xff,0xff]
+
+global_store_b32 v3, v1, s[2:3] offset:16
+// GFX1250: global_store_b32 v3, v1, s[2:3] offset:16 ; encoding: [0x02,0x80,0x06,0xee,0x00,0x00,0x80,0x00,0x03,0x10,0x00,0x00]
+
+global_store_b32 v[0:1], v2, off offset:-64
+// GFX1250: global_store_b32 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b32 v[0:1], v2, off offset:64
+// GFX1250: global_store_b32 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b32 v[4:5], v1, off offset:-16
+// GFX1250: global_store_b32 v[4:5], v1, off offset:-16 ; encoding: [0x7c,0x80,0x06,0xee,0x00,0x00,0x80,0x00,0x04,0xf0,0xff,0xff]
+
+global_store_b32 v[4:5], v1, off offset:16
+// GFX1250: global_store_b32 v[4:5], v1, off offset:16 ; encoding: [0x7c,0x80,0x06,0xee,0x00,0x00,0x80,0x00,0x04,0x10,0x00,0x00]
+
+global_store_b64 v0, v[2:3], s[0:1] offset:-64
+// GFX1250: global_store_b64 v0, v[2:3], s[0:1] offset:-64 ; encoding: [0x00,0xc0,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b64 v0, v[2:3], s[0:1] offset:64
+// GFX1250: global_store_b64 v0, v[2:3], s[0:1] offset:64 ; encoding: [0x00,0xc0,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b64 v1, v[2:3], s[2:3]
+// GFX1250: global_store_b64 v1, v[2:3], s[2:3] ; encoding: [0x02,0xc0,0x06,0xee,0x00,0x00,0x00,0x01,0x01,0x00,0x00,0x00]
+
+global_store_b64 v[0:1], v[2:3], off offset:-64
+// GFX1250: global_store_b64 v[0:1], v[2:3], off offset:-64 ; encoding: [0x7c,0xc0,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b64 v[0:1], v[2:3], off offset:64
+// GFX1250: global_store_b64 v[0:1], v[2:3], off offset:64 ; encoding: [0x7c,0xc0,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b64 v[2:3], v[4:5], off
+// GFX1250: global_store_b64 v[2:3], v[4:5], off ; encoding: [0x7c,0xc0,0x06,0xee,0x00,0x00,0x00,0x02,0x02,0x00,0x00,0x00]
+
+global_store_b8 v0, v2, s[0:1] offset:-64
+// GFX1250: global_store_b8 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b8 v0, v2, s[0:1] offset:64
+// GFX1250: global_store_b8 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x00,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b8 v3, v1, s[2:3]
+// GFX1250: global_store_b8 v3, v1, s[2:3] ; encoding: [0x02,0x00,0x06,0xee,0x00,0x00,0x80,0x00,0x03,0x00,0x00,0x00]
+
+global_store_b8 v[0:1], v2, off offset:-64
+// GFX1250: global_store_b8 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x00,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b8 v[0:1], v2, off offset:64
+// GFX1250: global_store_b8 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x00,0x06,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b8 v[4:5], v1, off
+// GFX1250: global_store_b8 v[4:5], v1, off ; encoding: [0x7c,0x00,0x06,0xee,0x00,0x00,0x80,0x00,0x04,0x00,0x00,0x00]
+
+global_store_b96 v0, v[2:4], s[0:1] offset:-64
+// GFX1250: global_store_b96 v0, v[2:4], s[0:1] offset:-64 ; encoding: [0x00,0x00,0x07,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b96 v0, v[2:4], s[0:1] offset:64
+// GFX1250: global_store_b96 v0, v[2:4], s[0:1] offset:64 ; encoding: [0x00,0x00,0x07,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b96 v1, v[4:6], s[2:3]
+// GFX1250: global_store_b96 v1, v[4:6], s[2:3] ; encoding: [0x02,0x00,0x07,0xee,0x00,0x00,0x00,0x02,0x01,0x00,0x00,0x00]
+
+global_store_b96 v[0:1], v[2:4], off offset:-64
+// GFX1250: global_store_b96 v[0:1], v[2:4], off offset:-64 ; encoding: [0x7c,0x00,0x07,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_b96 v[0:1], v[2:4], off offset:64
+// GFX1250: global_store_b96 v[0:1], v[2:4], off offset:64 ; encoding: [0x7c,0x00,0x07,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_b96 v[2:3], v[4:6], off
+// GFX1250: global_store_b96 v[2:3], v[4:6], off ; encoding: [0x7c,0x00,0x07,0xee,0x00,0x00,0x00,0x02,0x02,0x00,0x00,0x00]
+
+global_store_d16_hi_b16 v0, v2, s[0:1] offset:-64
+// GFX1250: global_store_d16_hi_b16 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x40,0x09,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_d16_hi_b16 v0, v2, s[0:1] offset:64
+// GFX1250: global_store_d16_hi_b16 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x40,0x09,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_d16_hi_b16 v3, v1, s[2:3]
+// GFX1250: global_store_d16_hi_b16 v3, v1, s[2:3] ; encoding: [0x02,0x40,0x09,0xee,0x00,0x00,0x80,0x00,0x03,0x00,0x00,0x00]
+
+global_store_d16_hi_b16 v[0:1], v2, off offset:-64
+// GFX1250: global_store_d16_hi_b16 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x40,0x09,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_d16_hi_b16 v[0:1], v2, off offset:64
+// GFX1250: global_store_d16_hi_b16 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x40,0x09,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_d16_hi_b16 v[4:5], v1, off
+// GFX1250: global_store_d16_hi_b16 v[4:5], v1, off ; encoding: [0x7c,0x40,0x09,0xee,0x00,0x00,0x80,0x00,0x04,0x00,0x00,0x00]
+
+global_store_d16_hi_b8 v0, v2, s[0:1] offset:-64
+// GFX1250: global_store_d16_hi_b8 v0, v2, s[0:1] offset:-64 ; encoding: [0x00,0x00,0x09,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_d16_hi_b8 v0, v2, s[0:1] offset:64
+// GFX1250: global_store_d16_hi_b8 v0, v2, s[0:1] offset:64 ; encoding: [0x00,0x00,0x09,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_d16_hi_b8 v3, v1, s[2:3]
+// GFX1250: global_store_d16_hi_b8 v3, v1, s[2:3] ; encoding: [0x02,0x00,0x09,0xee,0x00,0x00,0x80,0x00,0x03,0x00,0x00,0x00]
+
+global_store_d16_hi_b8 v[0:1], v2, off offset:-64
+// GFX1250: global_store_d16_hi_b8 v[0:1], v2, off offset:-64 ; encoding: [0x7c,0x00,0x09,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_d16_hi_b8 v[0:1], v2, off offset:64
+// GFX1250: global_store_d16_hi_b8 v[0:1], v2, off offset:64 ; encoding: [0x7c,0x00,0x09,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_d16_hi_b8 v[4:5], v1, off
+// GFX1250: global_store_d16_hi_b8 v[4:5], v1, off ; encoding: [0x7c,0x00,0x09,0xee,0x00,0x00,0x80,0x00,0x04,0x00,0x00,0x00]
+
+global_store_block v0, v[2:33], s[0:1] offset:-64
+// GFX1250: global_store_block v0, v[2:33], s[0:1] offset:-64 ; encoding: [0x00,0x00,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_block v0, v[2:33], s[0:1] offset:64
+// GFX1250: global_store_block v0, v[2:33], s[0:1] offset:64 ; encoding: [0x00,0x00,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_block v1, v[4:35], s[2:3]
+// GFX1250: global_store_block v1, v[4:35], s[2:3] ; encoding: [0x02,0x00,0x15,0xee,0x00,0x00,0x00,0x02,0x01,0x00,0x00,0x00]
+
+global_store_block v[0:1], v[2:33], off offset:-64
+// GFX1250: global_store_block v[0:1], v[2:33], off offset:-64 ; encoding: [0x7c,0x00,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+global_store_block v[0:1], v[2:33], off offset:64
+// GFX1250: global_store_block v[0:1], v[2:33], off offset:64 ; encoding: [0x7c,0x00,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+global_store_block v[2:3], v[4:35], off
+// GFX1250: global_store_block v[2:3], v[4:35], off ; encoding: [0x7c,0x00,0x15,0xee,0x00,0x00,0x00,0x02,0x02,0x00,0x00,0x00]
+
+global_store_block v[2:3], v[4:35], off th:TH_STORE_HT scope:SCOPE_SE
+// GFX1250: global_store_block v[2:3], v[4:35], off th:TH_STORE_HT scope:SCOPE_SE ; encoding: [0x7c,0x00,0x15,0xee,0x00,0x00,0x24,0x02,0x02,0x00,0x00,0x00]
+
+global_inv
+// GFX1250: global_inv ; encoding: [0x7c,0xc0,0x0a,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_inv scope:SCOPE_DEV
+// GFX1250: global_inv scope:SCOPE_DEV ; encoding: [0x7c,0xc0,0x0a,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00]
+
+global_inv scope:SCOPE_SYS
+// GFX1250: global_inv scope:SCOPE_SYS ; encoding: [0x7c,0xc0,0x0a,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00]
+
+global_wb
+// GFX1250: global_wb ; encoding: [0x7c,0x00,0x0b,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_wb scope:SCOPE_DEV
+// GFX1250: global_wb scope:SCOPE_DEV ; encoding: [0x7c,0x00,0x0b,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00]
+
+global_wb scope:SCOPE_SYS
+// GFX1250: global_wb scope:SCOPE_SYS ; encoding: [0x7c,0x00,0x0b,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00]
+
+global_wbinv
+// GFX1250: global_wbinv ; encoding: [0x7c,0xc0,0x13,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_wbinv scope:SCOPE_DEV
+// GFX1250: global_wbinv scope:SCOPE_DEV ; encoding: [0x7c,0xc0,0x13,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00]
+
+global_wbinv scope:SCOPE_SYS
+// GFX1250: global_wbinv scope:SCOPE_SYS ; encoding: [0x7c,0xc0,0x13,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00]
+
+scratch_load_b128 v[2:5], off, off offset:-64
+// GFX1250: scratch_load_b128 v[2:5], off, off offset:-64 ; encoding: [0x7c,0xc0,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b128 v[2:5], off, off offset:64
+// GFX1250: scratch_load_b128 v[2:5], off, off offset:64 ; encoding: [0x7c,0xc0,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b128 v[2:5], off, s0 offset:-64
+// GFX1250: scratch_load_b128 v[2:5], off, s0 offset:-64 ; encoding: [0x00,0xc0,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b128 v[2:5], off, s0 offset:64
+// GFX1250: scratch_load_b128 v[2:5], off, s0 offset:64 ; encoding: [0x00,0xc0,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b128 v[2:5], v0, off offset:-64
+// GFX1250: scratch_load_b128 v[2:5], v0, off offset:-64 ; encoding: [0x7c,0xc0,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b128 v[2:5], v0, off offset:64
+// GFX1250: scratch_load_b128 v[2:5], v0, off offset:64 ; encoding: [0x7c,0xc0,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b128 v[2:5], v0, s0 offset:-64
+// GFX1250: scratch_load_b128 v[2:5], v0, s0 offset:-64 ; encoding: [0x00,0xc0,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b128 v[2:5], v0, s0 offset:64
+// GFX1250: scratch_load_b128 v[2:5], v0, s0 offset:64 ; encoding: [0x00,0xc0,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b128 v[2:5], v2, s1
+// GFX1250: scratch_load_b128 v[2:5], v2, s1 ; encoding: [0x01,0xc0,0x05,0xed,0x02,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_b32 v1, off, off offset:2047
+// GFX1250: scratch_load_b32 v1, off, off offset:2047 ; encoding: [0x7c,0x00,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0xff,0x07,0x00]
+
+scratch_load_b32 v1, off, off offset:-64
+// GFX1250: scratch_load_b32 v1, off, off offset:-64 ; encoding: [0x7c,0x00,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b32 v1, off, off offset:64
+// GFX1250: scratch_load_b32 v1, off, off offset:64 ; encoding: [0x7c,0x00,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b32 v1, off, off
+// GFX1250: scratch_load_b32 v1, off, off ; encoding: [0x7c,0x00,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+scratch_load_b32 v1, off, s0 offset:-64
+// GFX1250: scratch_load_b32 v1, off, s0 offset:-64 ; encoding: [0x00,0x00,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b32 v1, off, s0 offset:64
+// GFX1250: scratch_load_b32 v1, off, s0 offset:64 ; encoding: [0x00,0x00,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b32 v1, off, s1 offset:2047
+// GFX1250: scratch_load_b32 v1, off, s1 offset:2047 ; encoding: [0x01,0x00,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0xff,0x07,0x00]
+
+scratch_load_b32 v1, v0, off offset:-64
+// GFX1250: scratch_load_b32 v1, v0, off offset:-64 ; encoding: [0x7c,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b32 v1, v0, off offset:64
+// GFX1250: scratch_load_b32 v1, v0, off offset:64 ; encoding: [0x7c,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b32 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_b32 v1, v0, s0 offset:-64 ; encoding: [0x00,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b32 v1, v0, s0 offset:64
+// GFX1250: scratch_load_b32 v1, v0, s0 offset:64 ; encoding: [0x00,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b32 v1, v2, off offset:2047
+// GFX1250: scratch_load_b32 v1, v2, off offset:2047 ; encoding: [0x7c,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x02,0xff,0x07,0x00]
+
+scratch_load_b32 v1, v2, s1 offset:-61440
+// GFX1250: scratch_load_b32 v1, v2, s1 offset:-61440 ; encoding: [0x01,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x10,0xff]
+
+scratch_load_b32 v1, v2, s1 offset:61440
+// GFX1250: scratch_load_b32 v1, v2, s1 offset:61440 ; encoding: [0x01,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0xf0,0x00]
+
+scratch_load_b32 v1, v2, s1 offset:2047
+// GFX1250: scratch_load_b32 v1, v2, s1 offset:2047 ; encoding: [0x01,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x02,0xff,0x07,0x00]
+
+scratch_load_b32 v1, v2, s1 offset:-4095
+// GFX1250: scratch_load_b32 v1, v2, s1 offset:-4095 ; encoding: [0x01,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x02,0x01,0xf0,0xff]
+
+scratch_load_b32 v1, v2, s1 offset:4095
+// GFX1250: scratch_load_b32 v1, v2, s1 offset:4095 ; encoding: [0x01,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x02,0xff,0x0f,0x00]
+
+scratch_load_b32 v1, v2, s1
+// GFX1250: scratch_load_b32 v1, v2, s1 ; encoding: [0x01,0x00,0x05,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_b64 v[2:3], off, off offset:-64
+// GFX1250: scratch_load_b64 v[2:3], off, off offset:-64 ; encoding: [0x7c,0x40,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b64 v[2:3], off, off offset:64
+// GFX1250: scratch_load_b64 v[2:3], off, off offset:64 ; encoding: [0x7c,0x40,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b64 v[2:3], off, s0 offset:-64
+// GFX1250: scratch_load_b64 v[2:3], off, s0 offset:-64 ; encoding: [0x00,0x40,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b64 v[2:3], off, s0 offset:64
+// GFX1250: scratch_load_b64 v[2:3], off, s0 offset:64 ; encoding: [0x00,0x40,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b64 v[2:3], v0, off offset:-64
+// GFX1250: scratch_load_b64 v[2:3], v0, off offset:-64 ; encoding: [0x7c,0x40,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b64 v[2:3], v0, off offset:64
+// GFX1250: scratch_load_b64 v[2:3], v0, off offset:64 ; encoding: [0x7c,0x40,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b64 v[2:3], v0, s0 offset:-64
+// GFX1250: scratch_load_b64 v[2:3], v0, s0 offset:-64 ; encoding: [0x00,0x40,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b64 v[2:3], v0, s0 offset:64
+// GFX1250: scratch_load_b64 v[2:3], v0, s0 offset:64 ; encoding: [0x00,0x40,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b64 v[2:3], v2, s1
+// GFX1250: scratch_load_b64 v[2:3], v2, s1 ; encoding: [0x01,0x40,0x05,0xed,0x02,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_b96 v[2:4], off, off offset:-64
+// GFX1250: scratch_load_b96 v[2:4], off, off offset:-64 ; encoding: [0x7c,0x80,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b96 v[2:4], off, off offset:64
+// GFX1250: scratch_load_b96 v[2:4], off, off offset:64 ; encoding: [0x7c,0x80,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b96 v[2:4], off, s0 offset:-64
+// GFX1250: scratch_load_b96 v[2:4], off, s0 offset:-64 ; encoding: [0x00,0x80,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b96 v[2:4], off, s0 offset:64
+// GFX1250: scratch_load_b96 v[2:4], off, s0 offset:64 ; encoding: [0x00,0x80,0x05,0xed,0x02,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b96 v[2:4], v0, off offset:-64
+// GFX1250: scratch_load_b96 v[2:4], v0, off offset:-64 ; encoding: [0x7c,0x80,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b96 v[2:4], v0, off offset:64
+// GFX1250: scratch_load_b96 v[2:4], v0, off offset:64 ; encoding: [0x7c,0x80,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b96 v[2:4], v0, s0 offset:-64
+// GFX1250: scratch_load_b96 v[2:4], v0, s0 offset:-64 ; encoding: [0x00,0x80,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_b96 v[2:4], v0, s0 offset:64
+// GFX1250: scratch_load_b96 v[2:4], v0, s0 offset:64 ; encoding: [0x00,0x80,0x05,0xed,0x02,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_b96 v[2:4], v2, s1
+// GFX1250: scratch_load_b96 v[2:4], v2, s1 ; encoding: [0x01,0x80,0x05,0xed,0x02,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_d16_b16 v1, off, off offset:-64
+// GFX1250: scratch_load_d16_b16 v1, off, off offset:-64 ; encoding: [0x7c,0x00,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_b16 v1, off, off offset:64
+// GFX1250: scratch_load_d16_b16 v1, off, off offset:64 ; encoding: [0x7c,0x00,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_b16 v1, off, s0 offset:-64
+// GFX1250: scratch_load_d16_b16 v1, off, s0 offset:-64 ; encoding: [0x00,0x00,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_b16 v1, off, s0 offset:64
+// GFX1250: scratch_load_d16_b16 v1, off, s0 offset:64 ; encoding: [0x00,0x00,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_b16 v1, v0, off offset:-64
+// GFX1250: scratch_load_d16_b16 v1, v0, off offset:-64 ; encoding: [0x7c,0x00,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_b16 v1, v0, off offset:64
+// GFX1250: scratch_load_d16_b16 v1, v0, off offset:64 ; encoding: [0x7c,0x00,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_b16 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_d16_b16 v1, v0, s0 offset:-64 ; encoding: [0x00,0x00,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_b16 v1, v0, s0 offset:64
+// GFX1250: scratch_load_d16_b16 v1, v0, s0 offset:64 ; encoding: [0x00,0x00,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_b16 v1, v2, s1
+// GFX1250: scratch_load_d16_b16 v1, v2, s1 ; encoding: [0x01,0x00,0x08,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_d16_hi_b16 v1, off, off offset:-64
+// GFX1250: scratch_load_d16_hi_b16 v1, off, off offset:-64 ; encoding: [0x7c,0xc0,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_b16 v1, off, off offset:64
+// GFX1250: scratch_load_d16_hi_b16 v1, off, off offset:64 ; encoding: [0x7c,0xc0,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_b16 v1, off, s0 offset:-64
+// GFX1250: scratch_load_d16_hi_b16 v1, off, s0 offset:-64 ; encoding: [0x00,0xc0,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_b16 v1, off, s0 offset:64
+// GFX1250: scratch_load_d16_hi_b16 v1, off, s0 offset:64 ; encoding: [0x00,0xc0,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_b16 v1, v0, off offset:-64
+// GFX1250: scratch_load_d16_hi_b16 v1, v0, off offset:-64 ; encoding: [0x7c,0xc0,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_b16 v1, v0, off offset:64
+// GFX1250: scratch_load_d16_hi_b16 v1, v0, off offset:64 ; encoding: [0x7c,0xc0,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_b16 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_d16_hi_b16 v1, v0, s0 offset:-64 ; encoding: [0x00,0xc0,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_b16 v1, v0, s0 offset:64
+// GFX1250: scratch_load_d16_hi_b16 v1, v0, s0 offset:64 ; encoding: [0x00,0xc0,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_b16 v1, v2, s1
+// GFX1250: scratch_load_d16_hi_b16 v1, v2, s1 ; encoding: [0x01,0xc0,0x08,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_d16_hi_i8 v1, off, off offset:-64
+// GFX1250: scratch_load_d16_hi_i8 v1, off, off offset:-64 ; encoding: [0x7c,0x80,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_i8 v1, off, off offset:64
+// GFX1250: scratch_load_d16_hi_i8 v1, off, off offset:64 ; encoding: [0x7c,0x80,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_i8 v1, off, s0 offset:-64
+// GFX1250: scratch_load_d16_hi_i8 v1, off, s0 offset:-64 ; encoding: [0x00,0x80,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_i8 v1, off, s0 offset:64
+// GFX1250: scratch_load_d16_hi_i8 v1, off, s0 offset:64 ; encoding: [0x00,0x80,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_i8 v1, v0, off offset:-64
+// GFX1250: scratch_load_d16_hi_i8 v1, v0, off offset:-64 ; encoding: [0x7c,0x80,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_i8 v1, v0, off offset:64
+// GFX1250: scratch_load_d16_hi_i8 v1, v0, off offset:64 ; encoding: [0x7c,0x80,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_i8 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_d16_hi_i8 v1, v0, s0 offset:-64 ; encoding: [0x00,0x80,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_i8 v1, v0, s0 offset:64
+// GFX1250: scratch_load_d16_hi_i8 v1, v0, s0 offset:64 ; encoding: [0x00,0x80,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_i8 v1, v2, s1
+// GFX1250: scratch_load_d16_hi_i8 v1, v2, s1 ; encoding: [0x01,0x80,0x08,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_d16_hi_u8 v1, off, off offset:-64
+// GFX1250: scratch_load_d16_hi_u8 v1, off, off offset:-64 ; encoding: [0x7c,0x40,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_u8 v1, off, off offset:64
+// GFX1250: scratch_load_d16_hi_u8 v1, off, off offset:64 ; encoding: [0x7c,0x40,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_u8 v1, off, s0 offset:-64
+// GFX1250: scratch_load_d16_hi_u8 v1, off, s0 offset:-64 ; encoding: [0x00,0x40,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_u8 v1, off, s0 offset:64
+// GFX1250: scratch_load_d16_hi_u8 v1, off, s0 offset:64 ; encoding: [0x00,0x40,0x08,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_u8 v1, v0, off offset:-64
+// GFX1250: scratch_load_d16_hi_u8 v1, v0, off offset:-64 ; encoding: [0x7c,0x40,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_u8 v1, v0, off offset:64
+// GFX1250: scratch_load_d16_hi_u8 v1, v0, off offset:64 ; encoding: [0x7c,0x40,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_u8 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_d16_hi_u8 v1, v0, s0 offset:-64 ; encoding: [0x00,0x40,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_hi_u8 v1, v0, s0 offset:64
+// GFX1250: scratch_load_d16_hi_u8 v1, v0, s0 offset:64 ; encoding: [0x00,0x40,0x08,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_hi_u8 v1, v2, s1
+// GFX1250: scratch_load_d16_hi_u8 v1, v2, s1 ; encoding: [0x01,0x40,0x08,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_d16_i8 v1, off, off offset:-64
+// GFX1250: scratch_load_d16_i8 v1, off, off offset:-64 ; encoding: [0x7c,0xc0,0x07,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_i8 v1, off, off offset:64
+// GFX1250: scratch_load_d16_i8 v1, off, off offset:64 ; encoding: [0x7c,0xc0,0x07,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_i8 v1, off, s0 offset:-64
+// GFX1250: scratch_load_d16_i8 v1, off, s0 offset:-64 ; encoding: [0x00,0xc0,0x07,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_i8 v1, off, s0 offset:64
+// GFX1250: scratch_load_d16_i8 v1, off, s0 offset:64 ; encoding: [0x00,0xc0,0x07,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_i8 v1, v0, off offset:-64
+// GFX1250: scratch_load_d16_i8 v1, v0, off offset:-64 ; encoding: [0x7c,0xc0,0x07,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_i8 v1, v0, off offset:64
+// GFX1250: scratch_load_d16_i8 v1, v0, off offset:64 ; encoding: [0x7c,0xc0,0x07,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_i8 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_d16_i8 v1, v0, s0 offset:-64 ; encoding: [0x00,0xc0,0x07,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_i8 v1, v0, s0 offset:64
+// GFX1250: scratch_load_d16_i8 v1, v0, s0 offset:64 ; encoding: [0x00,0xc0,0x07,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_i8 v1, v2, s1
+// GFX1250: scratch_load_d16_i8 v1, v2, s1 ; encoding: [0x01,0xc0,0x07,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_d16_u8 v1, off, off offset:-64
+// GFX1250: scratch_load_d16_u8 v1, off, off offset:-64 ; encoding: [0x7c,0x80,0x07,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_u8 v1, off, off offset:64
+// GFX1250: scratch_load_d16_u8 v1, off, off offset:64 ; encoding: [0x7c,0x80,0x07,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_u8 v1, off, s0 offset:-64
+// GFX1250: scratch_load_d16_u8 v1, off, s0 offset:-64 ; encoding: [0x00,0x80,0x07,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_u8 v1, off, s0 offset:64
+// GFX1250: scratch_load_d16_u8 v1, off, s0 offset:64 ; encoding: [0x00,0x80,0x07,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_u8 v1, v0, off offset:-64
+// GFX1250: scratch_load_d16_u8 v1, v0, off offset:-64 ; encoding: [0x7c,0x80,0x07,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_u8 v1, v0, off offset:64
+// GFX1250: scratch_load_d16_u8 v1, v0, off offset:64 ; encoding: [0x7c,0x80,0x07,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_u8 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_d16_u8 v1, v0, s0 offset:-64 ; encoding: [0x00,0x80,0x07,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_d16_u8 v1, v0, s0 offset:64
+// GFX1250: scratch_load_d16_u8 v1, v0, s0 offset:64 ; encoding: [0x00,0x80,0x07,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_d16_u8 v1, v2, s1
+// GFX1250: scratch_load_d16_u8 v1, v2, s1 ; encoding: [0x01,0x80,0x07,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_i16 v1, off, off offset:-64
+// GFX1250: scratch_load_i16 v1, off, off offset:-64 ; encoding: [0x7c,0xc0,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_i16 v1, off, off offset:64
+// GFX1250: scratch_load_i16 v1, off, off offset:64 ; encoding: [0x7c,0xc0,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_i16 v1, off, s0 offset:-64
+// GFX1250: scratch_load_i16 v1, off, s0 offset:-64 ; encoding: [0x00,0xc0,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_i16 v1, off, s0 offset:64
+// GFX1250: scratch_load_i16 v1, off, s0 offset:64 ; encoding: [0x00,0xc0,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_i16 v1, v0, off offset:-64
+// GFX1250: scratch_load_i16 v1, v0, off offset:-64 ; encoding: [0x7c,0xc0,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_i16 v1, v0, off offset:64
+// GFX1250: scratch_load_i16 v1, v0, off offset:64 ; encoding: [0x7c,0xc0,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_i16 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_i16 v1, v0, s0 offset:-64 ; encoding: [0x00,0xc0,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_i16 v1, v0, s0 offset:64
+// GFX1250: scratch_load_i16 v1, v0, s0 offset:64 ; encoding: [0x00,0xc0,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_i16 v1, v2, s1
+// GFX1250: scratch_load_i16 v1, v2, s1 ; encoding: [0x01,0xc0,0x04,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_i8 v1, off, off offset:-64
+// GFX1250: scratch_load_i8 v1, off, off offset:-64 ; encoding: [0x7c,0x40,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_i8 v1, off, off offset:64
+// GFX1250: scratch_load_i8 v1, off, off offset:64 ; encoding: [0x7c,0x40,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_i8 v1, off, s0 offset:-64
+// GFX1250: scratch_load_i8 v1, off, s0 offset:-64 ; encoding: [0x00,0x40,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_i8 v1, off, s0 offset:64
+// GFX1250: scratch_load_i8 v1, off, s0 offset:64 ; encoding: [0x00,0x40,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_i8 v1, v0, off offset:-64
+// GFX1250: scratch_load_i8 v1, v0, off offset:-64 ; encoding: [0x7c,0x40,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_i8 v1, v0, off offset:64
+// GFX1250: scratch_load_i8 v1, v0, off offset:64 ; encoding: [0x7c,0x40,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_i8 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_i8 v1, v0, s0 offset:-64 ; encoding: [0x00,0x40,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_i8 v1, v0, s0 offset:64
+// GFX1250: scratch_load_i8 v1, v0, s0 offset:64 ; encoding: [0x00,0x40,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_i8 v1, v2, s1
+// GFX1250: scratch_load_i8 v1, v2, s1 ; encoding: [0x01,0x40,0x04,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_u16 v1, off, off offset:-64
+// GFX1250: scratch_load_u16 v1, off, off offset:-64 ; encoding: [0x7c,0x80,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_u16 v1, off, off offset:64
+// GFX1250: scratch_load_u16 v1, off, off offset:64 ; encoding: [0x7c,0x80,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_u16 v1, off, s0 offset:-64
+// GFX1250: scratch_load_u16 v1, off, s0 offset:-64 ; encoding: [0x00,0x80,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_u16 v1, off, s0 offset:64
+// GFX1250: scratch_load_u16 v1, off, s0 offset:64 ; encoding: [0x00,0x80,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_u16 v1, v0, off offset:-64
+// GFX1250: scratch_load_u16 v1, v0, off offset:-64 ; encoding: [0x7c,0x80,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_u16 v1, v0, off offset:64
+// GFX1250: scratch_load_u16 v1, v0, off offset:64 ; encoding: [0x7c,0x80,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_u16 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_u16 v1, v0, s0 offset:-64 ; encoding: [0x00,0x80,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_u16 v1, v0, s0 offset:64
+// GFX1250: scratch_load_u16 v1, v0, s0 offset:64 ; encoding: [0x00,0x80,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_u16 v1, v2, s1
+// GFX1250: scratch_load_u16 v1, v2, s1 ; encoding: [0x01,0x80,0x04,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_u8 v1, off, off offset:-64
+// GFX1250: scratch_load_u8 v1, off, off offset:-64 ; encoding: [0x7c,0x00,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_u8 v1, off, off offset:64
+// GFX1250: scratch_load_u8 v1, off, off offset:64 ; encoding: [0x7c,0x00,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_u8 v1, off, s0 offset:-64
+// GFX1250: scratch_load_u8 v1, off, s0 offset:-64 ; encoding: [0x00,0x00,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_u8 v1, off, s0 offset:64
+// GFX1250: scratch_load_u8 v1, off, s0 offset:64 ; encoding: [0x00,0x00,0x04,0xed,0x01,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_u8 v1, v0, off offset:-64
+// GFX1250: scratch_load_u8 v1, v0, off offset:-64 ; encoding: [0x7c,0x00,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_u8 v1, v0, off offset:64
+// GFX1250: scratch_load_u8 v1, v0, off offset:64 ; encoding: [0x7c,0x00,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_u8 v1, v0, s0 offset:-64
+// GFX1250: scratch_load_u8 v1, v0, s0 offset:-64 ; encoding: [0x00,0x00,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_u8 v1, v0, s0 offset:64
+// GFX1250: scratch_load_u8 v1, v0, s0 offset:64 ; encoding: [0x00,0x00,0x04,0xed,0x01,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_u8 v1, v2, s1
+// GFX1250: scratch_load_u8 v1, v2, s1 ; encoding: [0x01,0x00,0x04,0xed,0x01,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_block v[4:35], off, off offset:-64
+// GFX1250: scratch_load_block v[4:35], off, off offset:-64 ; encoding: [0x7c,0xc0,0x14,0xed,0x04,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_block v[4:35], off, off offset:64
+// GFX1250: scratch_load_block v[4:35], off, off offset:64 ; encoding: [0x7c,0xc0,0x14,0xed,0x04,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_block v[4:35], off, s0 offset:-64
+// GFX1250: scratch_load_block v[4:35], off, s0 offset:-64 ; encoding: [0x00,0xc0,0x14,0xed,0x04,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_block v[4:35], off, s0 offset:64
+// GFX1250: scratch_load_block v[4:35], off, s0 offset:64 ; encoding: [0x00,0xc0,0x14,0xed,0x04,0x00,0x00,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_block v[4:35], v0, off offset:-64
+// GFX1250: scratch_load_block v[4:35], v0, off offset:-64 ; encoding: [0x7c,0xc0,0x14,0xed,0x04,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_block v[4:35], v0, off offset:64
+// GFX1250: scratch_load_block v[4:35], v0, off offset:64 ; encoding: [0x7c,0xc0,0x14,0xed,0x04,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_block v[4:35], v0, s0 offset:-64
+// GFX1250: scratch_load_block v[4:35], v0, s0 offset:-64 ; encoding: [0x00,0xc0,0x14,0xed,0x04,0x00,0x02,0x00,0x00,0xc0,0xff,0xff]
+
+scratch_load_block v[4:35], v0, s0 offset:64
+// GFX1250: scratch_load_block v[4:35], v0, s0 offset:64 ; encoding: [0x00,0xc0,0x14,0xed,0x04,0x00,0x02,0x00,0x00,0x40,0x00,0x00]
+
+scratch_load_block v[4:35], v2, s1
+// GFX1250: scratch_load_block v[4:35], v2, s1 ; encoding: [0x01,0xc0,0x14,0xed,0x04,0x00,0x02,0x00,0x02,0x00,0x00,0x00]
+
+scratch_load_block v[4:35], v2, s1 th:TH_LOAD_HT scope:SCOPE_SE
+// GFX1250: scratch_load_block v[4:35], v2, s1 th:TH_LOAD_HT scope:SCOPE_SE ; encoding: [0x01,0xc0,0x14,0xed,0x04,0x00,0x26,0x00,0x02,0x00,0x00,0x00]
+
+scratch_store_b128 off, v[2:5], off offset:-64
+// GFX1250: scratch_store_b128 off, v[2:5], off offset:-64 ; encoding: [0x7c,0x40,0x07,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b128 off, v[2:5], off offset:64
+// GFX1250: scratch_store_b128 off, v[2:5], off offset:64 ; encoding: [0x7c,0x40,0x07,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b128 off, v[2:5], s0 offset:-64
+// GFX1250: scratch_store_b128 off, v[2:5], s0 offset:-64 ; encoding: [0x00,0x40,0x07,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b128 off, v[2:5], s0 offset:64
+// GFX1250: scratch_store_b128 off, v[2:5], s0 offset:64 ; encoding: [0x00,0x40,0x07,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b128 v0, v[2:5], off offset:-64
+// GFX1250: scratch_store_b128 v0, v[2:5], off offset:-64 ; encoding: [0x7c,0x40,0x07,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b128 v0, v[2:5], off offset:64
+// GFX1250: scratch_store_b128 v0, v[2:5], off offset:64 ; encoding: [0x7c,0x40,0x07,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b128 v0, v[2:5], s0 offset:-64
+// GFX1250: scratch_store_b128 v0, v[2:5], s0 offset:-64 ; encoding: [0x00,0x40,0x07,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b128 v0, v[2:5], s0 offset:64
+// GFX1250: scratch_store_b128 v0, v[2:5], s0 offset:64 ; encoding: [0x00,0x40,0x07,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b128 v1, v[2:5], s3
+// GFX1250: scratch_store_b128 v1, v[2:5], s3 ; encoding: [0x03,0x40,0x07,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x00,0x00]
+
+scratch_store_b16 off, v2, off offset:-64
+// GFX1250: scratch_store_b16 off, v2, off offset:-64 ; encoding: [0x7c,0x40,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b16 off, v2, off offset:64
+// GFX1250: scratch_store_b16 off, v2, off offset:64 ; encoding: [0x7c,0x40,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b16 off, v2, s0 offset:-64
+// GFX1250: scratch_store_b16 off, v2, s0 offset:-64 ; encoding: [0x00,0x40,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b16 off, v2, s0 offset:64
+// GFX1250: scratch_store_b16 off, v2, s0 offset:64 ; encoding: [0x00,0x40,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b16 v0, v2, off offset:-64
+// GFX1250: scratch_store_b16 v0, v2, off offset:-64 ; encoding: [0x7c,0x40,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b16 v0, v2, off offset:64
+// GFX1250: scratch_store_b16 v0, v2, off offset:64 ; encoding: [0x7c,0x40,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b16 v0, v2, s0 offset:-64
+// GFX1250: scratch_store_b16 v0, v2, s0 offset:-64 ; encoding: [0x00,0x40,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b16 v0, v2, s0 offset:64
+// GFX1250: scratch_store_b16 v0, v2, s0 offset:64 ; encoding: [0x00,0x40,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b16 v1, v2, s3
+// GFX1250: scratch_store_b16 v1, v2, s3 ; encoding: [0x03,0x40,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x00,0x00]
+
+scratch_store_b32 off, v2, off offset:2047
+// GFX1250: scratch_store_b32 off, v2, off offset:2047 ; encoding: [0x7c,0x80,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xff,0x07,0x00]
+
+scratch_store_b32 off, v2, off offset:-64
+// GFX1250: scratch_store_b32 off, v2, off offset:-64 ; encoding: [0x7c,0x80,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b32 off, v2, off offset:64
+// GFX1250: scratch_store_b32 off, v2, off offset:64 ; encoding: [0x7c,0x80,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b32 off, v2, off
+// GFX1250: scratch_store_b32 off, v2, off ; encoding: [0x7c,0x80,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+
+scratch_store_b32 off, v2, s0 offset:-64
+// GFX1250: scratch_store_b32 off, v2, s0 offset:-64 ; encoding: [0x00,0x80,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b32 off, v2, s0 offset:64
+// GFX1250: scratch_store_b32 off, v2, s0 offset:64 ; encoding: [0x00,0x80,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b32 off, v2, s3 offset:2047
+// GFX1250: scratch_store_b32 off, v2, s3 offset:2047 ; encoding: [0x03,0x80,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xff,0x07,0x00]
+
+scratch_store_b32 v0, v2, off offset:-64
+// GFX1250: scratch_store_b32 v0, v2, off offset:-64 ; encoding: [0x7c,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b32 v0, v2, off offset:64
+// GFX1250: scratch_store_b32 v0, v2, off offset:64 ; encoding: [0x7c,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b32 v0, v2, s0 offset:-64
+// GFX1250: scratch_store_b32 v0, v2, s0 offset:-64 ; encoding: [0x00,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b32 v0, v2, s0 offset:64
+// GFX1250: scratch_store_b32 v0, v2, s0 offset:64 ; encoding: [0x00,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b32 v1, v2, off offset:2047
+// GFX1250: scratch_store_b32 v1, v2, off offset:2047 ; encoding: [0x7c,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0xff,0x07,0x00]
+
+scratch_store_b32 v1, v2, s1 offset:-61440
+// GFX1250: scratch_store_b32 v1, v2, s1 offset:-61440 ; encoding: [0x01,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x10,0xff]
+
+scratch_store_b32 v1, v2, s1 offset:61440
+// GFX1250: scratch_store_b32 v1, v2, s1 offset:61440 ; encoding: [0x01,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0xf0,0x00]
+
+scratch_store_b32 v1, v2, s1 offset:-4095
+// GFX1250: scratch_store_b32 v1, v2, s1 offset:-4095 ; encoding: [0x01,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0x01,0xf0,0xff]
+
+scratch_store_b32 v1, v2, s1 offset:4095
+// GFX1250: scratch_store_b32 v1, v2, s1 offset:4095 ; encoding: [0x01,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0xff,0x0f,0x00]
+
+scratch_store_b32 v1, v2, s3 offset:2047
+// GFX1250: scratch_store_b32 v1, v2, s3 offset:2047 ; encoding: [0x03,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0xff,0x07,0x00]
+
+scratch_store_b32 v1, v2, s3
+// GFX1250: scratch_store_b32 v1, v2, s3 ; encoding: [0x03,0x80,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x00,0x00]
+
+scratch_store_b64 off, v[2:3], off offset:-64
+// GFX1250: scratch_store_b64 off, v[2:3], off offset:-64 ; encoding: [0x7c,0xc0,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b64 off, v[2:3], off offset:64
+// GFX1250: scratch_store_b64 off, v[2:3], off offset:64 ; encoding: [0x7c,0xc0,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b64 off, v[2:3], s0 offset:-64
+// GFX1250: scratch_store_b64 off, v[2:3], s0 offset:-64 ; encoding: [0x00,0xc0,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b64 off, v[2:3], s0 offset:64
+// GFX1250: scratch_store_b64 off, v[2:3], s0 offset:64 ; encoding: [0x00,0xc0,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b64 v0, v[2:3], off offset:-64
+// GFX1250: scratch_store_b64 v0, v[2:3], off offset:-64 ; encoding: [0x7c,0xc0,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b64 v0, v[2:3], off offset:64
+// GFX1250: scratch_store_b64 v0, v[2:3], off offset:64 ; encoding: [0x7c,0xc0,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b64 v0, v[2:3], s0 offset:-64
+// GFX1250: scratch_store_b64 v0, v[2:3], s0 offset:-64 ; encoding: [0x00,0xc0,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b64 v0, v[2:3], s0 offset:64
+// GFX1250: scratch_store_b64 v0, v[2:3], s0 offset:64 ; encoding: [0x00,0xc0,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b64 v1, v[2:3], s3
+// GFX1250: scratch_store_b64 v1, v[2:3], s3 ; encoding: [0x03,0xc0,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x00,0x00]
+
+scratch_store_b8 off, v2, off offset:-64
+// GFX1250: scratch_store_b8 off, v2, off offset:-64 ; encoding: [0x7c,0x00,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b8 off, v2, off offset:64
+// GFX1250: scratch_store_b8 off, v2, off offset:64 ; encoding: [0x7c,0x00,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b8 off, v2, s0 offset:-64
+// GFX1250: scratch_store_b8 off, v2, s0 offset:-64 ; encoding: [0x00,0x00,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b8 off, v2, s0 offset:64
+// GFX1250: scratch_store_b8 off, v2, s0 offset:64 ; encoding: [0x00,0x00,0x06,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b8 v0, v2, off offset:-64
+// GFX1250: scratch_store_b8 v0, v2, off offset:-64 ; encoding: [0x7c,0x00,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b8 v0, v2, off offset:64
+// GFX1250: scratch_store_b8 v0, v2, off offset:64 ; encoding: [0x7c,0x00,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b8 v0, v2, s0 offset:-64
+// GFX1250: scratch_store_b8 v0, v2, s0 offset:-64 ; encoding: [0x00,0x00,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b8 v0, v2, s0 offset:64
+// GFX1250: scratch_store_b8 v0, v2, s0 offset:64 ; encoding: [0x00,0x00,0x06,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b8 v1, v2, s3
+// GFX1250: scratch_store_b8 v1, v2, s3 ; encoding: [0x03,0x00,0x06,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x00,0x00]
+
+scratch_store_b96 off, v[2:4], off offset:-64
+// GFX1250: scratch_store_b96 off, v[2:4], off offset:-64 ; encoding: [0x7c,0x00,0x07,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b96 off, v[2:4], off offset:64
+// GFX1250: scratch_store_b96 off, v[2:4], off offset:64 ; encoding: [0x7c,0x00,0x07,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b96 off, v[2:4], s0 offset:-64
+// GFX1250: scratch_store_b96 off, v[2:4], s0 offset:-64 ; encoding: [0x00,0x00,0x07,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b96 off, v[2:4], s0 offset:64
+// GFX1250: scratch_store_b96 off, v[2:4], s0 offset:64 ; encoding: [0x00,0x00,0x07,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b96 v0, v[2:4], off offset:-64
+// GFX1250: scratch_store_b96 v0, v[2:4], off offset:-64 ; encoding: [0x7c,0x00,0x07,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b96 v0, v[2:4], off offset:64
+// GFX1250: scratch_store_b96 v0, v[2:4], off offset:64 ; encoding: [0x7c,0x00,0x07,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b96 v0, v[2:4], s0 offset:-64
+// GFX1250: scratch_store_b96 v0, v[2:4], s0 offset:-64 ; encoding: [0x00,0x00,0x07,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_b96 v0, v[2:4], s0 offset:64
+// GFX1250: scratch_store_b96 v0, v[2:4], s0 offset:64 ; encoding: [0x00,0x00,0x07,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_b96 v1, v[2:4], s3
+// GFX1250: scratch_store_b96 v1, v[2:4], s3 ; encoding: [0x03,0x00,0x07,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x00,0x00]
+
+scratch_store_d16_hi_b16 off, v2, off offset:-64
+// GFX1250: scratch_store_d16_hi_b16 off, v2, off offset:-64 ; encoding: [0x7c,0x40,0x09,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_d16_hi_b16 off, v2, off offset:64
+// GFX1250: scratch_store_d16_hi_b16 off, v2, off offset:64 ; encoding: [0x7c,0x40,0x09,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_d16_hi_b16 off, v2, s0 offset:-64
+// GFX1250: scratch_store_d16_hi_b16 off, v2, s0 offset:-64 ; encoding: [0x00,0x40,0x09,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_d16_hi_b16 off, v2, s0 offset:64
+// GFX1250: scratch_store_d16_hi_b16 off, v2, s0 offset:64 ; encoding: [0x00,0x40,0x09,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_d16_hi_b16 v0, v2, off offset:-64
+// GFX1250: scratch_store_d16_hi_b16 v0, v2, off offset:-64 ; encoding: [0x7c,0x40,0x09,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_d16_hi_b16 v0, v2, off offset:64
+// GFX1250: scratch_store_d16_hi_b16 v0, v2, off offset:64 ; encoding: [0x7c,0x40,0x09,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_d16_hi_b16 v0, v2, s0 offset:-64
+// GFX1250: scratch_store_d16_hi_b16 v0, v2, s0 offset:-64 ; encoding: [0x00,0x40,0x09,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_d16_hi_b16 v0, v2, s0 offset:64
+// GFX1250: scratch_store_d16_hi_b16 v0, v2, s0 offset:64 ; encoding: [0x00,0x40,0x09,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_d16_hi_b16 v1, v2, s3
+// GFX1250: scratch_store_d16_hi_b16 v1, v2, s3 ; encoding: [0x03,0x40,0x09,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x00,0x00]
+
+scratch_store_d16_hi_b8 off, v2, off offset:-64
+// GFX1250: scratch_store_d16_hi_b8 off, v2, off offset:-64 ; encoding: [0x7c,0x00,0x09,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_d16_hi_b8 off, v2, off offset:64
+// GFX1250: scratch_store_d16_hi_b8 off, v2, off offset:64 ; encoding: [0x7c,0x00,0x09,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_d16_hi_b8 off, v2, s0 offset:-64
+// GFX1250: scratch_store_d16_hi_b8 off, v2, s0 offset:-64 ; encoding: [0x00,0x00,0x09,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_d16_hi_b8 off, v2, s0 offset:64
+// GFX1250: scratch_store_d16_hi_b8 off, v2, s0 offset:64 ; encoding: [0x00,0x00,0x09,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_d16_hi_b8 v0, v2, off offset:-64
+// GFX1250: scratch_store_d16_hi_b8 v0, v2, off offset:-64 ; encoding: [0x7c,0x00,0x09,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_d16_hi_b8 v0, v2, off offset:64
+// GFX1250: scratch_store_d16_hi_b8 v0, v2, off offset:64 ; encoding: [0x7c,0x00,0x09,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_d16_hi_b8 v0, v2, s0 offset:-64
+// GFX1250: scratch_store_d16_hi_b8 v0, v2, s0 offset:-64 ; encoding: [0x00,0x00,0x09,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_d16_hi_b8 v0, v2, s0 offset:64
+// GFX1250: scratch_store_d16_hi_b8 v0, v2, s0 offset:64 ; encoding: [0x00,0x00,0x09,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_d16_hi_b8 v1, v2, s3
+// GFX1250: scratch_store_d16_hi_b8 v1, v2, s3 ; encoding: [0x03,0x00,0x09,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x00,0x00]
+
+scratch_store_block off, v[2:33], off offset:-64
+// GFX1250: scratch_store_block off, v[2:33], off offset:-64 ; encoding: [0x7c,0x00,0x15,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_block off, v[2:33], off offset:64
+// GFX1250: scratch_store_block off, v[2:33], off offset:64 ; encoding: [0x7c,0x00,0x15,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_block off, v[2:33], s0 offset:-64
+// GFX1250: scratch_store_block off, v[2:33], s0 offset:-64 ; encoding: [0x00,0x00,0x15,0xed,0x00,0x00,0x00,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_block off, v[2:33], s0 offset:64
+// GFX1250: scratch_store_block off, v[2:33], s0 offset:64 ; encoding: [0x00,0x00,0x15,0xed,0x00,0x00,0x00,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_block v0, v[2:33], off offset:-64
+// GFX1250: scratch_store_block v0, v[2:33], off offset:-64 ; encoding: [0x7c,0x00,0x15,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_block v0, v[2:33], off offset:64
+// GFX1250: scratch_store_block v0, v[2:33], off offset:64 ; encoding: [0x7c,0x00,0x15,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_block v0, v[2:33], s0 offset:-64
+// GFX1250: scratch_store_block v0, v[2:33], s0 offset:-64 ; encoding: [0x00,0x00,0x15,0xed,0x00,0x00,0x02,0x01,0x00,0xc0,0xff,0xff]
+
+scratch_store_block v0, v[2:33], s0 offset:64
+// GFX1250: scratch_store_block v0, v[2:33], s0 offset:64 ; encoding: [0x00,0x00,0x15,0xed,0x00,0x00,0x02,0x01,0x00,0x40,0x00,0x00]
+
+scratch_store_block v1, v[2:33], s3
+// GFX1250: scratch_store_block v1, v[2:33], s3 ; encoding: [0x03,0x00,0x15,0xed,0x00,0x00,0x02,0x01,0x01,0x00,0x00,0x00]
+
+scratch_store_block v1, v[2:33], s3 th:TH_STORE_HT scope:SCOPE_SE
+// GFX1250: scratch_store_block v1, v[2:33], s3 th:TH_STORE_HT scope:SCOPE_SE ; encoding: [0x03,0x00,0x15,0xed,0x00,0x00,0x26,0x01,0x01,0x00,0x00,0x00]
+
global_load_b32 v0, v[2:3], off nv
// GFX1250: global_load_b32 v0, v[2:3], off nv ; encoding: [0xfc,0x00,0x05,0xee,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: nv is not supported on this GPU
@@ -285,6 +3096,216 @@ flat_load_monitor_b64 v[2:3], v2, s[4:5] offset:64 scale_offset
// GFX1250: flat_load_monitor_b64 v[2:3], v2, s[4:5] offset:64 scale_offset ; encoding: [0x04,0x40,0x1c,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+flat_atomic_add_f64 v[0:1], v[2:3] offset:4095
+// GFX1250: flat_atomic_add_f64 v[0:1], v[2:3] offset:4095 ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_add_f64 v[0:1], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_add_f64 v[254:255], v[2:3] offset:4095
+// GFX1250: flat_atomic_add_f64 v[254:255], v[2:3] offset:4095 ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_add_f64 v[254:255], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_add_f64 v[0:1], v[254:255] offset:4095
+// GFX1250: flat_atomic_add_f64 v[0:1], v[254:255] offset:4095 ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_add_f64 v[0:1], v[254:255] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_add_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_add_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_add_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_add_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_add_f64 v[0:1], v[2:3] offset:7
+// GFX1250: flat_atomic_add_f64 v[0:1], v[2:3] offset:7 ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_add_f64 v[0:1], v[2:3] offset:7
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_num_f64 v[0:1], v[2:3] offset:4095
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] offset:4095 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_num_f64 v[0:1], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_num_f64 v[254:255], v[2:3] offset:4095
+// GFX1250: flat_atomic_min_num_f64 v[254:255], v[2:3] offset:4095 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_num_f64 v[254:255], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_num_f64 v[0:1], v[254:255] offset:4095
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[254:255] offset:4095 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_num_f64 v[0:1], v[254:255] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_num_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_num_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_num_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_num_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_num_f64 v[0:1], v[2:3] offset:7
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] offset:7 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_num_f64 v[0:1], v[2:3] offset:7
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_num_f64 v[0:1], v[2:3] offset:4095
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] offset:4095 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_num_f64 v[0:1], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_num_f64 v[254:255], v[2:3] offset:4095
+// GFX1250: flat_atomic_max_num_f64 v[254:255], v[2:3] offset:4095 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_num_f64 v[254:255], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_num_f64 v[0:1], v[254:255] offset:4095
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[254:255] offset:4095 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_num_f64 v[0:1], v[254:255] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_num_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_num_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_num_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_num_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_num_f64 v[0:1], v[2:3] offset:7
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] offset:7 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_num_f64 v[0:1], v[2:3] offset:7
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_f64 v[0:1], v[2:3] offset:4095
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] offset:4095 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_f64 v[0:1], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_f64 v[254:255], v[2:3] offset:4095
+// GFX1250: flat_atomic_min_num_f64 v[254:255], v[2:3] offset:4095 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_f64 v[254:255], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_f64 v[0:1], v[254:255] offset:4095
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[254:255] offset:4095 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_f64 v[0:1], v[254:255] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_min_f64 v[0:1], v[2:3] offset:7
+// GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] offset:7 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_min_f64 v[0:1], v[2:3] offset:7
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_f64 v[0:1], v[2:3] offset:4095
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] offset:4095 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_f64 v[0:1], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_f64 v[254:255], v[2:3] offset:4095
+// GFX1250: flat_atomic_max_num_f64 v[254:255], v[2:3] offset:4095 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_f64 v[254:255], v[2:3] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_f64 v[0:1], v[254:255] offset:4095
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[254:255] offset:4095 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_f64 v[0:1], v[254:255] offset:4095
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_f64 v[0:1], v[2:3]
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_f64 v[0:1], v[2:3]
+// GFX12-ERR-NEXT:{{^}}^
+
+flat_atomic_max_f64 v[0:1], v[2:3] offset:7
+// GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] offset:7 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: flat_atomic_max_f64 v[0:1], v[2:3] offset:7
+// GFX12-ERR-NEXT:{{^}}^
+
+global_atomic_add_f64 v[0:1], v[2:3], off
+// GFX1250: global_atomic_add_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0x40,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: global_atomic_add_f64 v[0:1], v[2:3], off
+// GFX12-ERR-NEXT:{{^}}^
+
+global_atomic_min_num_f64 v[0:1], v[2:3], off
+// GFX1250: global_atomic_min_num_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0xc0,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: global_atomic_min_num_f64 v[0:1], v[2:3], off
+// GFX12-ERR-NEXT:{{^}}^
+
+global_atomic_max_num_f64 v[0:1], v[2:3], off
+// GFX1250: global_atomic_max_num_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0x00,0x17,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: global_atomic_max_num_f64 v[0:1], v[2:3], off
+// GFX12-ERR-NEXT:{{^}}^
+
+global_atomic_min_f64 v[0:1], v[2:3], off
+// GFX1250: global_atomic_min_num_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0xc0,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: global_atomic_min_f64 v[0:1], v[2:3], off
+// GFX12-ERR-NEXT:{{^}}^
+
+global_atomic_max_f64 v[0:1], v[2:3], off
+// GFX1250: global_atomic_max_num_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0x00,0x17,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX12-ERR-NEXT: global_atomic_max_f64 v[0:1], v[2:3], off
+// GFX12-ERR-NEXT:{{^}}^
+
global_store_async_from_lds_b8 v[2:3], v1, off th:TH_STORE_BYPASS scope:SCOPE_SYS
// GFX1250: global_store_async_from_lds_b8 v[2:3], v1, off th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x7c,0xc0,0x18,0xee,0x00,0x00,0xbc,0x00,0x02,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
@@ -521,6 +3542,14 @@ flat_atomic_add_f32 v2, v3, s[2:3] offset:64
// GFX1250: flat_atomic_add_f32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x15,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode
+flat_atomic_add_f64 v[0:1], v3, v[2:3], s[2:3] scale_offset th:TH_ATOMIC_RETURN
+// GFX1250: flat_atomic_add_f64 v[0:1], v3, v[2:3], s[2:3] scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x15,0xec,0x00,0x00,0x11,0x01,0x03,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+flat_atomic_add_f64 v3, v[2:3], s[2:3]
+// GFX1250: flat_atomic_add_f64 v3, v[2:3], s[2:3] ; encoding: [0x02,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x03,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
flat_atomic_add_u32 v1, v2, s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN
// GFX1250: flat_atomic_add_u32 v1, v2, s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0xc0,0xff,0xff]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s
index 789d6f8..1f40a32 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s
@@ -1,6 +1,108 @@
// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
+v_bitop3_b32 v5, v1, v2, s3
+// GFX1250: v_bitop3_b32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x34,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bitop3_b32 v5, v1, v2, s3 bitop3:161
+// GFX1250: v_bitop3_b32 v5, v1, v2, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x34,0xd6,0x01,0x05,0x0e,0x30]
+
+v_bitop3_b32 v5, v255, s2, s105 bitop3:0x27
+// GFX1250: v_bitop3_b32 v5, v255, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x34,0xd6,0xff,0x05,0xa4,0xe1]
+
+v_bitop3_b32 v5, s1, v255, exec_hi bitop3:100
+// GFX1250: v_bitop3_b32 v5, s1, v255, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x34,0xd6,0x01,0xfe,0xff,0x89]
+
+v_bitop3_b32 v5, s105, s105, exec_lo bitop3:0
+// GFX1250: v_bitop3_b32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x34,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bitop3_b32 v5, vcc_lo, ttmp15, v3 bitop3:0x15
+// GFX1250: v_bitop3_b32 v5, vcc_lo, ttmp15, v3 bitop3:0x15 ; encoding: [0x05,0x02,0x34,0xd6,0x6a,0xf6,0x0c,0xa4]
+
+v_bitop3_b32 v5, vcc_hi, 0xaf123456, v255 bitop3:63
+// GFX1250: v_bitop3_b32 v5, vcc_hi, 0xaf123456, v255 bitop3:0x3f ; encoding: [0x05,0x07,0x34,0xd6,0x6b,0xfe,0xfd,0xe7,0x56,0x34,0x12,0xaf]
+
+v_bitop3_b32 v5, ttmp15, src_scc, ttmp15 bitop3:0x24
+// GFX1250: v_bitop3_b32 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x34,0xd6,0x7b,0xfa,0xed,0x81]
+
+v_bitop3_b32 v5, m0, 0.5, m0 bitop3:5
+// GFX1250: v_bitop3_b32 v5, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x34,0xd6,0x7d,0xe0,0xf5,0xa1]
+
+v_bitop3_b32 v5, exec_lo, -1, vcc_hi bitop3:6
+// GFX1250: v_bitop3_b32 v5, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x34,0xd6,0x7e,0x82,0xad,0xc1]
+
+v_bitop3_b32 v5, exec_hi, null, vcc_lo bitop3:77
+// GFX1250: v_bitop3_b32 v5, exec_hi, null, vcc_lo bitop3:0x4d ; encoding: [0x05,0x01,0x34,0xd6,0x7f,0xf8,0xa8,0xa9]
+
+v_bitop3_b32 v5, null, exec_lo, 0xaf123456 bitop3:88
+// GFX1250: v_bitop3_b32 v5, null, exec_lo, 0xaf123456 bitop3:0x58 ; encoding: [0x05,0x03,0x34,0xd6,0x7c,0xfc,0xfc,0x0b,0x56,0x34,0x12,0xaf]
+
+v_bitop3_b32 v5, -1, exec_hi, src_scc bitop3:99
+// GFX1250: v_bitop3_b32 v5, -1, exec_hi, src_scc bitop3:0x63 ; encoding: [0x05,0x04,0x34,0xd6,0xc1,0xfe,0xf4,0x6b]
+
+v_bitop3_b32 v5, 0.5, m0, 0.5 bitop3:101
+// GFX1250: v_bitop3_b32 v5, 0.5, m0, 0.5 bitop3:0x65 ; encoding: [0x05,0x04,0x34,0xd6,0xf0,0xfa,0xc0,0xab]
+
+v_bitop3_b32 v5, src_scc, vcc_lo, -1 bitop3:102
+// GFX1250: v_bitop3_b32 v5, src_scc, vcc_lo, -1 bitop3:0x66 ; encoding: [0x05,0x04,0x34,0xd6,0xfd,0xd4,0x04,0xcb]
+
+v_bitop3_b32 v255, 0xaf123456, vcc_hi, null bitop3:103
+// GFX1250: v_bitop3_b32 v255, 0xaf123456, vcc_hi, null bitop3:0x67 ; encoding: [0xff,0x04,0x34,0xd6,0xff,0xd6,0xf0,0xe9,0x56,0x34,0x12,0xaf]
+
+v_bitop3_b16 v5, v1, v2, s3
+// GFX1250: v_bitop3_b16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x33,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bitop3_b16 v5, v1, v2, s3 bitop3:161
+// GFX1250: v_bitop3_b16 v5, v1, v2, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30]
+
+v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27
+// GFX1250: v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1]
+
+v_bitop3_b16 v5, s1, v255, exec_hi bitop3:100
+// GFX1250: v_bitop3_b16 v5, s1, v255, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89]
+
+v_bitop3_b16 v5, s105, s105, exec_lo bitop3:0
+// GFX1250: v_bitop3_b16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15
+// GFX1250: v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4]
+
+v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:63
+// GFX1250: v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00]
+
+v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24
+// GFX1250: v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81]
+
+v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5
+// GFX1250: v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xe0,0xf5,0xa1]
+
+v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6
+// GFX1250: v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1]
+
+v_bitop3_b16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX1250: v_bitop3_b16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1] ; encoding: [0x05,0x78,0x33,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_bitop3_b16 v5, exec_hi, null, vcc_lo bitop3:0x77 op_sel:[1,1,1,1]
+// GFX1250: v_bitop3_b16 v5, exec_hi, null, vcc_lo bitop3:0x77 op_sel:[1,1,1,1] ; encoding: [0x05,0x7e,0x33,0xd6,0x7f,0xf8,0xa8,0xe9]
+
+v_bitop3_b16 v5, null, exec_lo, 0xfe0b bitop3:0x88 op_sel:[0,0,0,0]
+// GFX1250: v_bitop3_b16 v5, null, exec_lo, 0xfe0b bitop3:0x88 ; encoding: [0x05,0x01,0x33,0xd6,0x7c,0xfc,0xfc,0x13,0x0b,0xfe,0x00,0x00]
+
+v_bitop3_b16 v5, -1, exec_hi, src_scc bitop3:99 op_sel:[1,0,0,0]
+// GFX1250: v_bitop3_b16 v5, -1, exec_hi, src_scc bitop3:0x63 op_sel:[1,0,0,0] ; encoding: [0x05,0x0c,0x33,0xd6,0xc1,0xfe,0xf4,0x6b]
+
+v_bitop3_b16 v5, 0.5, m0, 0.5 bitop3:101 op_sel:[0,1,0,0]
+// GFX1250: v_bitop3_b16 v5, 0.5, m0, 0.5 bitop3:0x65 op_sel:[0,1,0,0] ; encoding: [0x05,0x14,0x33,0xd6,0xf0,0xfa,0xc0,0xab]
+
+v_bitop3_b16 v5, src_scc, vcc_lo, -1 bitop3:102 op_sel:[0,0,1,0]
+// GFX1250: v_bitop3_b16 v5, src_scc, vcc_lo, -1 bitop3:0x66 op_sel:[0,0,1,0] ; encoding: [0x05,0x24,0x33,0xd6,0xfd,0xd4,0x04,0xcb]
+
+v_bitop3_b16 v255, 0xfe0b, vcc_hi, null bitop3:103 op_sel:[0,0,0,1]
+// GFX1250: v_bitop3_b16 v255, 0xfe0b, vcc_hi, null bitop3:0x67 op_sel:[0,0,0,1] ; encoding: [0xff,0x44,0x33,0xd6,0xff,0xd6,0xf0,0xe9,0x0b,0xfe,0x00,0x00]
+
+v_bitop3_b16 v1, v2, v3, v4 bitop3:103 op_sel:[1,1,1,1]
+// GFX1250: v_bitop3_b16 v1, v2, v3, v4 bitop3:0x67 op_sel:[1,1,1,1] ; encoding: [0x01,0x7c,0x33,0xd6,0x02,0x07,0x12,0xec]
+
v_lshl_add_u64 v[2:3], s[4:5], v7, v[8:9]
// GFX1250: v_lshl_add_u64 v[2:3], s[4:5], v7, v[8:9] ; encoding: [0x02,0x00,0x52,0xd6,0x04,0x0e,0x22,0x04]
@@ -16,6 +118,165 @@ v_lshl_add_u64 v[2:3], s[4:5], 4, v[2:3]
v_lshl_add_u64 v[2:3], v[4:5], v7, 12345
// GFX1250: v_lshl_add_u64 v[2:3], v[4:5], v7, 0x3039 ; encoding: [0x02,0x00,0x52,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+v_mad_u32 v2, s4, v7, v8
+// GFX1250: v_mad_u32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x0e,0x22,0x04]
+
+v_mad_u32 v2, v4, 0, 1
+// GFX1250: v_mad_u32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x01,0x05,0x02]
+
+v_mad_u32 v2, v4, 3, s2
+// GFX1250: v_mad_u32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x07,0x09,0x00]
+
+v_mad_u32 v2, s4, 4, v2
+// GFX1250: v_mad_u32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x08,0x09,0x04]
+
+v_mad_u32 v2, v4, v7, 12345
+// GFX1250: v_mad_u32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_max_i64 v[2:3], s[4:5], v[6:7]
+// GFX1250: v_max_i64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0x0c,0x02,0x00]
+
+v_max_i64 v[2:3], v[4:5], 1
+// GFX1250: v_max_i64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0x03,0x01,0x00]
+
+v_max_i64 v[2:3], v[4:5], s[2:3]
+// GFX1250: v_max_i64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0x05,0x00,0x00]
+
+v_max_i64 v[2:3], v[4:5], 12345
+// GFX1250: v_max_i64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+v_max_u64 v[2:3], s[4:5], v[6:7]
+// GFX1250: v_max_u64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x19,0xd7,0x04,0x0c,0x02,0x00]
+
+v_max_u64 v[2:3], v[4:5], 1
+// GFX1250: v_max_u64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x19,0xd7,0x04,0x03,0x01,0x00]
+
+v_max_u64 v[2:3], v[4:5], s[2:3]
+// GFX1250: v_max_u64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x19,0xd7,0x04,0x05,0x00,0x00]
+
+v_max_u64 v[2:3], v[4:5], 12345
+// GFX1250: v_max_u64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x19,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+v_min_i64 v[2:3], s[4:5], v[6:7]
+// GFX1250: v_min_i64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0x0c,0x02,0x00]
+
+v_min_i64 v[2:3], v[4:5], 1
+// GFX1250: v_min_i64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0x03,0x01,0x00]
+
+v_min_i64 v[2:3], v[4:5], s[2:3]
+// GFX1250: v_min_i64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0x05,0x00,0x00]
+
+v_min_i64 v[2:3], v[4:5], 12345
+// GFX1250: v_min_i64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+v_min_u64 v[2:3], s[4:5], v[6:7]
+// GFX1250: v_min_u64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x18,0xd7,0x04,0x0c,0x02,0x00]
+
+v_min_u64 v[2:3], v[4:5], 1
+// GFX1250: v_min_u64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x18,0xd7,0x04,0x03,0x01,0x00]
+
+v_min_u64 v[2:3], v[4:5], s[2:3]
+// GFX1250: v_min_u64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x18,0xd7,0x04,0x05,0x00,0x00]
+
+v_min_u64 v[2:3], v[4:5], 12345
+// GFX1250: v_min_u64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x18,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9]
+// GFX1250: v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9] ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x0e,0x22,0x04]
+
+v_mad_nc_u64_u32 v[2:3], v4, 0, 1
+// GFX1250: v_mad_nc_u64_u32 v[2:3], v4, 0, 1 ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x01,0x05,0x02]
+
+v_mad_nc_u64_u32 v[2:3], v4, 3, s[2:3]
+// GFX1250: v_mad_nc_u64_u32 v[2:3], v4, 3, s[2:3] ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x07,0x09,0x00]
+
+v_mad_nc_u64_u32 v[2:3], s4, 4, v[2:3]
+// GFX1250: v_mad_nc_u64_u32 v[2:3], s4, 4, v[2:3] ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x08,0x09,0x04]
+
+v_mad_nc_u64_u32 v[2:3], v4, v7, 12345
+// GFX1250: v_mad_nc_u64_u32 v[2:3], v4, v7, 0x3039 ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9] clamp
+// GFX1250: v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfa,0xd6,0x04,0x0e,0x22,0x04]
+
+v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9]
+// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+
+v_mad_nc_i64_i32 v[2:3], v4, 0, 1
+// GFX1250: v_mad_nc_i64_i32 v[2:3], v4, 0, 1 ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x01,0x05,0x02]
+
+v_mad_nc_i64_i32 v[2:3], v4, 3, s[2:3]
+// GFX1250: v_mad_nc_i64_i32 v[2:3], v4, 3, s[2:3] ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x07,0x09,0x00]
+
+v_mad_nc_i64_i32 v[2:3], s4, 4, v[2:3]
+// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, 4, v[2:3] ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x08,0x09,0x04]
+
+v_mad_nc_i64_i32 v[2:3], v4, v7, 12345
+// GFX1250: v_mad_nc_i64_i32 v[2:3], v4, v7, 0x3039 ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp
+// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_i32 v2, s4, v7, v8
+// GFX1250: v_add_min_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_i32 v2, v4, 0, 1
+// GFX1250: v_add_min_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_i32 v2, v4, 3, s2
+// GFX1250: v_add_min_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_i32 v2, s4, 4, v2
+// GFX1250: v_add_min_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_i32 v2, v4, v7, 12345
+// GFX1250: v_add_min_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_i32 v2, s4, v7, v8
+// GFX1250: v_add_max_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_i32 v2, v4, 0, 1
+// GFX1250: v_add_max_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_i32 v2, v4, 3, s2
+// GFX1250: v_add_max_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_i32 v2, s4, 4, v2
+// GFX1250: v_add_max_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_i32 v2, v4, v7, 12345
+// GFX1250: v_add_max_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_min_u32 v2, s4, v7, v8
+// GFX1250: v_add_min_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_u32 v2, v4, 0, 1
+// GFX1250: v_add_min_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_u32 v2, v4, 3, s2
+// GFX1250: v_add_min_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_u32 v2, s4, 4, v2
+// GFX1250: v_add_min_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_u32 v2, v4, v7, 12345
+// GFX1250: v_add_min_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_u32 v2, s4, v7, v8
+// GFX1250: v_add_max_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_u32 v2, v4, 0, 1
+// GFX1250: v_add_max_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_u32 v2, v4, 3, s2
+// GFX1250: v_add_max_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_u32 v2, s4, 4, v2
+// GFX1250: v_add_max_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_u32 v2, v4, v7, 12345
+// GFX1250: v_add_max_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
v_cvt_pk_bf16_f32 v5, v1, v2
// GFX1250: v_cvt_pk_bf16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6d,0xd7,0x01,0x05,0x02,0x00]
@@ -60,3 +321,448 @@ v_cvt_pk_bf16_f32 v5, src_scc, vcc_lo mul:4
v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
// GFX1250: v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_ashr_pk_i8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_i8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_i8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04]
+
+v_ashr_pk_u8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_u8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_u8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04]
+
+v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,0]
+// GFX1250: v_cvt_pk_bf8_f16 v1, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,1]
+// GFX1250: v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, v2 clamp
+// GFX1250: v_cvt_pk_bf8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, s2
+// GFX1250: v_cvt_pk_bf8_f16 v1, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 100.0
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_bf8_f16
+
+v_cvt_pk_bf8_f16 v1, 1
+// GFX1250: v_cvt_pk_bf8_f16 v1, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0x3800
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0.5
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0x3118
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0.15915494
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,0]
+// GFX1250: v_cvt_pk_fp8_f16 v1, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,1]
+// GFX1250: v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, v2 clamp
+// GFX1250: v_cvt_pk_fp8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, s2
+// GFX1250: v_cvt_pk_fp8_f16 v1, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 100.0
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_fp8_f16
+
+v_cvt_pk_fp8_f16 v1, 1
+// GFX1250: v_cvt_pk_fp8_f16 v1, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0x3800
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0.5
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0x3118
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0.15915494
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, v1, v2
+// GFX1250: v_cvt_pk_f16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_f16_f32 v5, v255, v255
+// GFX1250: v_cvt_pk_f16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_f16_f32 v5, s1, s2
+// GFX1250: v_cvt_pk_f16_f32 v5, s1, s2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, s105, s105
+// GFX1250: v_cvt_pk_f16_f32 v5, s105, s105 ; encoding: [0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456 ; encoding: [0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_f16_f32 v5, ttmp15, src_scc
+// GFX1250: v_cvt_pk_f16_f32 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, m0, 0.5
+// GFX1250: v_cvt_pk_f16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_lo, -1
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_hi, null
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_hi, null ; encoding: [0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, null, exec_lo
+// GFX1250: v_cvt_pk_f16_f32 v5, null, exec_lo ; encoding: [0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, -1, exec_hi
+// GFX1250: v_cvt_pk_f16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2
+// GFX1250: v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08]
+
+v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4
+// GFX1250: v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4 ; encoding: [0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10]
+
+v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
+// GFX1250: v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_f16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_bf8_f16 v1, v2, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:0
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_bf8_f16 v1, |v2|, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, |v2|, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_fp8_f16 v1, |v2|, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, |v2|, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1]
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_fp8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3 clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_bf8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_fp8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_fp8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 clamp
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_bf8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_bf8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5 ; encoding: [0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1 ; encoding: [0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2 ; encoding: [0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3 ; encoding: [0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1 ; encoding: [0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
index e1165fa..03f642d 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
@@ -1,6 +1,108 @@
// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
+v_bitop3_b32 v5, v1, v2, s3
+// GFX1250: v_bitop3_b32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x34,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bitop3_b32 v5, v1, v2, s3 bitop3:161
+// GFX1250: v_bitop3_b32 v5, v1, v2, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x34,0xd6,0x01,0x05,0x0e,0x30]
+
+v_bitop3_b32 v5, v255, s2, s105 bitop3:0x27
+// GFX1250: v_bitop3_b32 v5, v255, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x34,0xd6,0xff,0x05,0xa4,0xe1]
+
+v_bitop3_b32 v5, s1, v255, exec_hi bitop3:100
+// GFX1250: v_bitop3_b32 v5, s1, v255, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x34,0xd6,0x01,0xfe,0xff,0x89]
+
+v_bitop3_b32 v5, s105, s105, exec_lo bitop3:0
+// GFX1250: v_bitop3_b32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x34,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bitop3_b32 v5, vcc_lo, ttmp15, v3 bitop3:0x15
+// GFX1250: v_bitop3_b32 v5, vcc_lo, ttmp15, v3 bitop3:0x15 ; encoding: [0x05,0x02,0x34,0xd6,0x6a,0xf6,0x0c,0xa4]
+
+v_bitop3_b32 v5, vcc_hi, 0xaf123456, v255 bitop3:63
+// GFX1250: v_bitop3_b32 v5, vcc_hi, 0xaf123456, v255 bitop3:0x3f ; encoding: [0x05,0x07,0x34,0xd6,0x6b,0xfe,0xfd,0xe7,0x56,0x34,0x12,0xaf]
+
+v_bitop3_b32 v5, ttmp15, src_scc, ttmp15 bitop3:0x24
+// GFX1250: v_bitop3_b32 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x34,0xd6,0x7b,0xfa,0xed,0x81]
+
+v_bitop3_b32 v5, m0, 0.5, m0 bitop3:5
+// GFX1250: v_bitop3_b32 v5, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x34,0xd6,0x7d,0xe0,0xf5,0xa1]
+
+v_bitop3_b32 v5, exec_lo, -1, vcc_hi bitop3:6
+// GFX1250: v_bitop3_b32 v5, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x34,0xd6,0x7e,0x82,0xad,0xc1]
+
+v_bitop3_b32 v5, exec_hi, null, vcc_lo bitop3:77
+// GFX1250: v_bitop3_b32 v5, exec_hi, null, vcc_lo bitop3:0x4d ; encoding: [0x05,0x01,0x34,0xd6,0x7f,0xf8,0xa8,0xa9]
+
+v_bitop3_b32 v5, null, exec_lo, 0xaf123456 bitop3:88
+// GFX1250: v_bitop3_b32 v5, null, exec_lo, 0xaf123456 bitop3:0x58 ; encoding: [0x05,0x03,0x34,0xd6,0x7c,0xfc,0xfc,0x0b,0x56,0x34,0x12,0xaf]
+
+v_bitop3_b32 v5, -1, exec_hi, src_scc bitop3:99
+// GFX1250: v_bitop3_b32 v5, -1, exec_hi, src_scc bitop3:0x63 ; encoding: [0x05,0x04,0x34,0xd6,0xc1,0xfe,0xf4,0x6b]
+
+v_bitop3_b32 v5, 0.5, m0, 0.5 bitop3:101
+// GFX1250: v_bitop3_b32 v5, 0.5, m0, 0.5 bitop3:0x65 ; encoding: [0x05,0x04,0x34,0xd6,0xf0,0xfa,0xc0,0xab]
+
+v_bitop3_b32 v5, src_scc, vcc_lo, -1 bitop3:102
+// GFX1250: v_bitop3_b32 v5, src_scc, vcc_lo, -1 bitop3:0x66 ; encoding: [0x05,0x04,0x34,0xd6,0xfd,0xd4,0x04,0xcb]
+
+v_bitop3_b32 v255, 0xaf123456, vcc_hi, null bitop3:103
+// GFX1250: v_bitop3_b32 v255, 0xaf123456, vcc_hi, null bitop3:0x67 ; encoding: [0xff,0x04,0x34,0xd6,0xff,0xd6,0xf0,0xe9,0x56,0x34,0x12,0xaf]
+
+v_bitop3_b16 v5.l, v1.l, v2.l, s3
+// GFX1250: v_bitop3_b16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x33,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bitop3_b16 v5, v1, v2, s3 bitop3:161
+// GFX1250: v_bitop3_b16 v5, v1, v2, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30]
+
+v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27
+// GFX1250: v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1]
+
+v_bitop3_b16 v5, s1, v255, exec_hi bitop3:100
+// GFX1250: v_bitop3_b16 v5, s1, v255, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89]
+
+v_bitop3_b16 v5, s105, s105, exec_lo bitop3:0
+// GFX1250: v_bitop3_b16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15
+// GFX1250: v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4]
+
+v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:63
+// GFX1250: v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00]
+
+v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24
+// GFX1250: v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81]
+
+v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5
+// GFX1250: v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xe0,0xf5,0xa1]
+
+v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6
+// GFX1250: v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1]
+
+v_bitop3_b16 v5.h, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX1250: v_bitop3_b16 v5.h, exec_hi, null, vcc_lo op_sel:[1,1,1,1] ; encoding: [0x05,0x78,0x33,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_bitop3_b16 v5.h, exec_hi, null, vcc_lo bitop3:0x77 op_sel:[1,1,1,1]
+// GFX1250: v_bitop3_b16 v5.h, exec_hi, null, vcc_lo bitop3:0x77 op_sel:[1,1,1,1] ; encoding: [0x05,0x7e,0x33,0xd6,0x7f,0xf8,0xa8,0xe9]
+
+v_bitop3_b16 v5.l, null, exec_lo, 0xfe0b bitop3:0x88
+// GFX1250: v_bitop3_b16 v5.l, null, exec_lo, 0xfe0b bitop3:0x88 ; encoding: [0x05,0x01,0x33,0xd6,0x7c,0xfc,0xfc,0x13,0x0b,0xfe,0x00,0x00]
+
+v_bitop3_b16 v5.l, -1, exec_hi, src_scc bitop3:99
+// GFX1250: v_bitop3_b16 v5.l, -1, exec_hi, src_scc bitop3:0x63 ; encoding: [0x05,0x04,0x33,0xd6,0xc1,0xfe,0xf4,0x6b]
+
+v_bitop3_b16 v5.l, 0.5, m0, 0.5 bitop3:101 op_sel:[0,1,0,0]
+// GFX1250: v_bitop3_b16 v5.l, 0.5, m0, 0.5 bitop3:0x65 op_sel:[0,1,0,0] ; encoding: [0x05,0x14,0x33,0xd6,0xf0,0xfa,0xc0,0xab]
+
+v_bitop3_b16 v5.l, src_scc, vcc_lo, -1 bitop3:102 op_sel:[0,0,1,0]
+// GFX1250: v_bitop3_b16 v5.l, src_scc, vcc_lo, -1 bitop3:0x66 op_sel:[0,0,1,0] ; encoding: [0x05,0x24,0x33,0xd6,0xfd,0xd4,0x04,0xcb]
+
+v_bitop3_b16 v255.h, 0xfe0b, vcc_hi, null bitop3:103 op_sel:[0,0,0,1]
+// GFX1250: v_bitop3_b16 v255.h, 0xfe0b, vcc_hi, null bitop3:0x67 op_sel:[0,0,0,1] ; encoding: [0xff,0x44,0x33,0xd6,0xff,0xd6,0xf0,0xe9,0x0b,0xfe,0x00,0x00]
+
+v_bitop3_b16 v1.h, v2.h, v3.h, v4.h bitop3:103
+// GFX1250: v_bitop3_b16 v1.h, v2.h, v3.h, v4.h bitop3:0x67 op_sel:[1,1,1,1] ; encoding: [0x01,0x7c,0x33,0xd6,0x02,0x07,0x12,0xec]
+
v_lshl_add_u64 v[2:3], s[4:5], v7, v[8:9]
// GFX1250: v_lshl_add_u64 v[2:3], s[4:5], v7, v[8:9] ; encoding: [0x02,0x00,0x52,0xd6,0x04,0x0e,0x22,0x04]
@@ -16,6 +118,165 @@ v_lshl_add_u64 v[2:3], s[4:5], 4, v[2:3]
v_lshl_add_u64 v[2:3], v[4:5], v7, 12345
// GFX1250: v_lshl_add_u64 v[2:3], v[4:5], v7, 0x3039 ; encoding: [0x02,0x00,0x52,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+v_mad_u32 v2, s4, v7, v8
+// GFX1250: v_mad_u32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x0e,0x22,0x04]
+
+v_mad_u32 v2, v4, 0, 1
+// GFX1250: v_mad_u32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x01,0x05,0x02]
+
+v_mad_u32 v2, v4, 3, s2
+// GFX1250: v_mad_u32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x07,0x09,0x00]
+
+v_mad_u32 v2, s4, 4, v2
+// GFX1250: v_mad_u32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x08,0x09,0x04]
+
+v_mad_u32 v2, v4, v7, 12345
+// GFX1250: v_mad_u32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_max_i64 v[2:3], s[4:5], v[6:7]
+// GFX1250: v_max_i64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0x0c,0x02,0x00]
+
+v_max_i64 v[2:3], v[4:5], 1
+// GFX1250: v_max_i64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0x03,0x01,0x00]
+
+v_max_i64 v[2:3], v[4:5], s[2:3]
+// GFX1250: v_max_i64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0x05,0x00,0x00]
+
+v_max_i64 v[2:3], v[4:5], 12345
+// GFX1250: v_max_i64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+v_max_u64 v[2:3], s[4:5], v[6:7]
+// GFX1250: v_max_u64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x19,0xd7,0x04,0x0c,0x02,0x00]
+
+v_max_u64 v[2:3], v[4:5], 1
+// GFX1250: v_max_u64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x19,0xd7,0x04,0x03,0x01,0x00]
+
+v_max_u64 v[2:3], v[4:5], s[2:3]
+// GFX1250: v_max_u64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x19,0xd7,0x04,0x05,0x00,0x00]
+
+v_max_u64 v[2:3], v[4:5], 12345
+// GFX1250: v_max_u64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x19,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+v_min_i64 v[2:3], s[4:5], v[6:7]
+// GFX1250: v_min_i64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0x0c,0x02,0x00]
+
+v_min_i64 v[2:3], v[4:5], 1
+// GFX1250: v_min_i64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0x03,0x01,0x00]
+
+v_min_i64 v[2:3], v[4:5], s[2:3]
+// GFX1250: v_min_i64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0x05,0x00,0x00]
+
+v_min_i64 v[2:3], v[4:5], 12345
+// GFX1250: v_min_i64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+v_min_u64 v[2:3], s[4:5], v[6:7]
+// GFX1250: v_min_u64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x18,0xd7,0x04,0x0c,0x02,0x00]
+
+v_min_u64 v[2:3], v[4:5], 1
+// GFX1250: v_min_u64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x18,0xd7,0x04,0x03,0x01,0x00]
+
+v_min_u64 v[2:3], v[4:5], s[2:3]
+// GFX1250: v_min_u64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x18,0xd7,0x04,0x05,0x00,0x00]
+
+v_min_u64 v[2:3], v[4:5], 12345
+// GFX1250: v_min_u64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x18,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9]
+// GFX1250: v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9] ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x0e,0x22,0x04]
+
+v_mad_nc_u64_u32 v[2:3], v4, 0, 1
+// GFX1250: v_mad_nc_u64_u32 v[2:3], v4, 0, 1 ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x01,0x05,0x02]
+
+v_mad_nc_u64_u32 v[2:3], v4, 3, s[2:3]
+// GFX1250: v_mad_nc_u64_u32 v[2:3], v4, 3, s[2:3] ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x07,0x09,0x00]
+
+v_mad_nc_u64_u32 v[2:3], s4, 4, v[2:3]
+// GFX1250: v_mad_nc_u64_u32 v[2:3], s4, 4, v[2:3] ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x08,0x09,0x04]
+
+v_mad_nc_u64_u32 v[2:3], v4, v7, 12345
+// GFX1250: v_mad_nc_u64_u32 v[2:3], v4, v7, 0x3039 ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9] clamp
+// GFX1250: v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfa,0xd6,0x04,0x0e,0x22,0x04]
+
+v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9]
+// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+
+v_mad_nc_i64_i32 v[2:3], v4, 0, 1
+// GFX1250: v_mad_nc_i64_i32 v[2:3], v4, 0, 1 ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x01,0x05,0x02]
+
+v_mad_nc_i64_i32 v[2:3], v4, 3, s[2:3]
+// GFX1250: v_mad_nc_i64_i32 v[2:3], v4, 3, s[2:3] ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x07,0x09,0x00]
+
+v_mad_nc_i64_i32 v[2:3], s4, 4, v[2:3]
+// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, 4, v[2:3] ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x08,0x09,0x04]
+
+v_mad_nc_i64_i32 v[2:3], v4, v7, 12345
+// GFX1250: v_mad_nc_i64_i32 v[2:3], v4, v7, 0x3039 ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp
+// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_i32 v2, s4, v7, v8
+// GFX1250: v_add_min_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_i32 v2, v4, 0, 1
+// GFX1250: v_add_min_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_i32 v2, v4, 3, s2
+// GFX1250: v_add_min_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_i32 v2, s4, 4, v2
+// GFX1250: v_add_min_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_i32 v2, v4, v7, 12345
+// GFX1250: v_add_min_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_i32 v2, s4, v7, v8
+// GFX1250: v_add_max_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_i32 v2, v4, 0, 1
+// GFX1250: v_add_max_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_i32 v2, v4, 3, s2
+// GFX1250: v_add_max_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_i32 v2, s4, 4, v2
+// GFX1250: v_add_max_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_i32 v2, v4, v7, 12345
+// GFX1250: v_add_max_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_min_u32 v2, s4, v7, v8
+// GFX1250: v_add_min_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_u32 v2, v4, 0, 1
+// GFX1250: v_add_min_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_u32 v2, v4, 3, s2
+// GFX1250: v_add_min_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_u32 v2, s4, 4, v2
+// GFX1250: v_add_min_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_u32 v2, v4, v7, 12345
+// GFX1250: v_add_min_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_u32 v2, s4, v7, v8
+// GFX1250: v_add_max_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_u32 v2, v4, 0, 1
+// GFX1250: v_add_max_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_u32 v2, v4, 3, s2
+// GFX1250: v_add_max_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_u32 v2, s4, 4, v2
+// GFX1250: v_add_max_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_u32 v2, v4, v7, 12345
+// GFX1250: v_add_max_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
v_cvt_pk_bf16_f32 v5, v1, v2
// GFX1250: v_cvt_pk_bf16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6d,0xd7,0x01,0x05,0x02,0x00]
@@ -60,3 +321,448 @@ v_cvt_pk_bf16_f32 v5, src_scc, vcc_lo mul:4
v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
// GFX1250: v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_ashr_pk_i8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_i8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_i8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04]
+
+v_ashr_pk_u8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_u8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_u8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04]
+
+v_cvt_pk_bf8_f16 v1.l, v2
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.h, v2
+// GFX1250: v_cvt_pk_bf8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v0.l, v2 clamp
+// GFX1250: v_cvt_pk_bf8_f16 v0.l, v2 clamp ; encoding: [0x00,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, s2
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 100.0
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_bf8_f16
+
+v_cvt_pk_bf8_f16 v1.l, 1
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0x3800
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0.5
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0x3118
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0.15915494
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, v2
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.h, v2
+// GFX1250: v_cvt_pk_fp8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, v2 clamp
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, s2
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 100.0
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_fp8_f16
+
+v_cvt_pk_fp8_f16 v1.l, 1
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0x3800
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0.5
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0x3118
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0.15915494
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, v1, v2
+// GFX1250: v_cvt_pk_f16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_f16_f32 v5, v255, v255
+// GFX1250: v_cvt_pk_f16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_f16_f32 v5, s1, s2
+// GFX1250: v_cvt_pk_f16_f32 v5, s1, s2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, s105, s105
+// GFX1250: v_cvt_pk_f16_f32 v5, s105, s105 ; encoding: [0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456 ; encoding: [0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_f16_f32 v5, ttmp15, src_scc
+// GFX1250: v_cvt_pk_f16_f32 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, m0, 0.5
+// GFX1250: v_cvt_pk_f16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_lo, -1
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_hi, null
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_hi, null ; encoding: [0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, null, exec_lo
+// GFX1250: v_cvt_pk_f16_f32 v5, null, exec_lo ; encoding: [0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, -1, exec_hi
+// GFX1250: v_cvt_pk_f16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2
+// GFX1250: v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08]
+
+v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4
+// GFX1250: v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4 ; encoding: [0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10]
+
+v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
+// GFX1250: v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_f16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:0
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_bf8_f16 v1, |v2.l|, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, |v2.h|, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_fp8_f16 v1, |v2.l|, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, |v2.h|, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1.l, v2, v3
+// GFX1250: v_cvt_pk_fp8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1.h, v2, v3
+// GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_fp8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1.h, v2, v3 clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_bf8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_fp8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_fp8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 clamp
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_bf8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_bf8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5 ; encoding: [0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1 ; encoding: [0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2 ; encoding: [0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3 ; encoding: [0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1 ; encoding: [0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s
index bc910b9..a926f7e 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s
@@ -2,6 +2,198 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+v_bitop3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:161 quad_perm:[0,1,2,3]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:100 row_half_mirror
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0xfe,0x8f,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, s105 bitop3:0 row_shl:1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:0x15 row_shl:15
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:0x15 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x34,0xd6,0xfa,0x04,0xae,0xa1,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo bitop3:63 row_shr:1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x34,0xd6,0xfa,0x04,0xaa,0xe1,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x24 row_shr:15
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:5 row_ror:1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0xfe,0xa1,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:6 row_ror:15
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0xfa,0xc1,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x34,0xd6,0xfa,0x04,0xf2,0xa9,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x34,0xd6,0xfa,0x04,0x06,0x0b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0xc2,0x6b,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x34,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:161 quad_perm:[0,1,2,3]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:100 row_half_mirror
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:0 row_shl:1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:63 row_shr:1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x33,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:102 op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:0x66 op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0xfe,0xc9,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:103 op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x67 op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x0c,0x33,0xd6,0xfa,0x04,0xfa,0xe9,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:104 op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:0x68 op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x15,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:104 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x68 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x25,0x33,0xd6,0xfa,0x04,0x06,0x0b,0x01,0x60,0x01,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:104 op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX1250: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x68 op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x45,0x33,0xd6,0xfa,0xfe,0xf7,0x0b,0xff,0x6f,0x0d,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:102 op_sel:[1,1,1,1] quad_perm:[0,1,2,3]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x66 op_sel:[1,1,1,1] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0x0e,0xcc,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -57,3 +249,263 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:40: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:61: error: not a valid operand.
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,0] quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,0] quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
index 3bb84e2..f766e52 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
@@ -2,6 +2,198 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+v_bitop3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:161 quad_perm:[0,1,2,3]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:100 row_half_mirror
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0xfe,0x8f,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, s105 bitop3:0 row_shl:1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:0x15 row_shl:15
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:0x15 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x34,0xd6,0xfa,0x04,0xae,0xa1,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo bitop3:63 row_shr:1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x34,0xd6,0xfa,0x04,0xaa,0xe1,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x24 row_shr:15
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:5 row_ror:1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0xfe,0xa1,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:6 row_ror:15
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0xfa,0xc1,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x34,0xd6,0xfa,0x04,0xf2,0xa9,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x34,0xd6,0xfa,0x04,0x06,0x0b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0xc2,0x6b,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x34,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:161 quad_perm:[0,1,2,3]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:100 row_half_mirror
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:0 row_shl:1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:63 row_shr:1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x33,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi bitop3:102 op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi bitop3:0x66 op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0xfe,0xc9,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.l, v1.h, v2.l, exec_lo bitop3:103 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.h, v2.l, exec_lo bitop3:0x67 op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x0c,0x33,0xd6,0xfa,0x04,0xfa,0xe9,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.h, null bitop3:104 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.h, null bitop3:0x68 op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x15,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:104 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:0x68 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x25,0x33,0xd6,0xfa,0x04,0x06,0x0b,0x01,0x60,0x01,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v255.h, v255.l, v255.l, src_scc bitop3:104 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX1250: v_bitop3_b16_e64_dpp v255.h, v255.l, v255.l, src_scc bitop3:0x68 op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x45,0x33,0xd6,0xfa,0xfe,0xf7,0x0b,0xff,0x6f,0x0d,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:102 quad_perm:[0,1,2,3]
+// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:0x66 op_sel:[1,1,1,1] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0x0e,0xcc,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -57,3 +249,263 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v1.l, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1.l, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:42: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v1.h, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1.h, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:42: error: invalid operand for instruction
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1.l, v2 quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1.l, v2 quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s
index f48445f..3b864b9 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s
@@ -2,6 +2,158 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+v_bitop3_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:161 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:0xa1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xfe,0x37,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xa6,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:100 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:0x64 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xae,0x89,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo bitop3:0 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x34,0xd6,0xe9,0x04,0xee,0xa1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:63 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:0x3f dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x07,0x34,0xd6,0xe9,0x04,0xfe,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xfa,0x81,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0xf2,0xa1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0x06,0xc3,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:77 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:0x4d dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x01,0x34,0xd6,0xea,0x04,0xc2,0xab,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:88 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:0x58 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x03,0x34,0xd6,0xe9,0xfe,0xf7,0x0b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:161 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:0xa1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xfe,0x37,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xa6,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:100 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:0x64 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xae,0x89,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:0xf dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x33,0xd6,0xe9,0x04,0xee,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:63 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:0x3f dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x07,0x33,0xd6,0xe9,0x04,0xfe,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xfa,0x81,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0xf2,0xa1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x33,0xd6,0xea,0x04,0x06,0xc3,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:77 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x4d dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x01,0x33,0xd6,0xe9,0xfe,0xf7,0xab,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x33,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:88 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:0x58 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x7b,0x33,0xd6,0xe9,0x04,0xfe,0x09,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:99 op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x63 op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0c,0x33,0xd6,0xe9,0x04,0xfa,0x69,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x10,0x33,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:102 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x66 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x24,0x33,0xd6,0xe9,0x04,0x06,0xcb,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:103 op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX1250: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x67 op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x44,0x33,0xd6,0xea,0xfe,0xf7,0xeb,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:102 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,0,0,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x66 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x7c,0x33,0xd6,0xe9,0x04,0x0e,0xcc,0x01,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -17,3 +169,191 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:37: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:58: error: not a valid operand.
+
+v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:37: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s
index d7a95f4..c726a0d 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s
@@ -2,6 +2,158 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+v_bitop3_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:161 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:0xa1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xfe,0x37,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xa6,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:100 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:0x64 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xae,0x89,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo bitop3:0 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x34,0xd6,0xe9,0x04,0xee,0xa1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:63 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:0x3f dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x07,0x34,0xd6,0xe9,0x04,0xfe,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xfa,0x81,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0xf2,0xa1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0x06,0xc3,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:77 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:0x4d dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x01,0x34,0xd6,0xea,0x04,0xc2,0xab,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:88 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:0x58 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x03,0x34,0xd6,0xe9,0xfe,0xf7,0x0b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:161 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:0xa1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xfe,0x37,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xa6,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:100 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:0x64 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xae,0x89,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:0xf dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x33,0xd6,0xe9,0x04,0xee,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:63 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:0x3f dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x07,0x33,0xd6,0xe9,0x04,0xfe,0xe1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xfa,0x81,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0xf2,0xa1,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x33,0xd6,0xea,0x04,0x06,0xc3,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:77 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x4d dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x01,0x33,0xd6,0xe9,0xfe,0xf7,0xab,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x33,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi bitop3:88 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi bitop3:0x58 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x7b,0x33,0xd6,0xe9,0x04,0xfe,0x09,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.l, v1.h, v2.l, exec_lo bitop3:99 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.h, v2.l, exec_lo bitop3:0x63 op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0c,0x33,0xd6,0xe9,0x04,0xfa,0x69,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.h, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.h, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x10,0x33,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:102 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:0x66 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x24,0x33,0xd6,0xe9,0x04,0x06,0xcb,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v255.h, v255.l, v255.l, src_scc bitop3:103 dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX1250: v_bitop3_b16_e64_dpp v255.h, v255.l, v255.l, src_scc bitop3:0x67 op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x44,0x33,0xd6,0xea,0xfe,0xf7,0xeb,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:102 dpp8:[0,0,0,0,0,0,0,0]
+// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:0x66 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x7c,0x33,0xd6,0xe9,0x04,0x0e,0xcc,0x01,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -17,3 +169,191 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v5.l, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5.l, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v5.h, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5.h, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction
+
+v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:37: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
index e2fafe4..c5bd00c 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
@@ -5,7 +5,159 @@ v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
// GFX125X-ERR-NEXT:{{^}}v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+v_mad_u32 v2, v4, v7, v8 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mad_u32 v2, v4, v7, v8 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_max_i64 v[2:3], v[4:5], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_max_i64 v[2:3], v[4:5], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_max_u64 v[2:3], v[4:5], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_max_u64 v[2:3], v[4:5], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_min_i64 v[2:3], v[4:5], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_min_i64 v[2:3], v[4:5], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_min_u64 v[2:3], v[4:5], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_min_u64 v[2:3], v[4:5], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_nc_u64_u32 v[4:5], v2, v5, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mad_nc_u64_u32 v[4:5], v2, v5, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] quad_perm:[3,2,1,0]
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
// GFX125X-ERR-NEXT:{{^}}v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] quad_perm:[3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mad_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_max_i64 v[2:3], v[4:5], v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_max_i64 v[2:3], v[4:5], v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_max_u64 v[2:3], v[4:5], v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_max_u64 v[2:3], v[4:5], v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_min_i64 v[2:3], v[4:5], v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_min_i64 v[2:3], v[4:5], v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_min_u64 v[2:3], v[4:5], v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_min_u64 v[2:3], v[4:5], v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_nc_u64_u32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mad_nc_u64_u32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashr_pk_u8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_ashr_pk_u8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_bf8_f16 v1, v2, v3 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_bf8_f16 v1, v2, v3 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_bf8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_bf8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_fp8_f16 v1, v2, v3 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_fp8_f16 v1, v2, v3 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_fp8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_fp8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid scale_sel value.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:4
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid byte_sel value.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:4
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], 1, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f16_fp8 v[10:13], 1, v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_bf16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], s[20:21], v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp8 v[10:17], s[20:21], v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], s20, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f16_fp4 v[10:13], s20, v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], s20, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_bf16_fp4 v[10:13], s20, v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
+// GFX125X-ERR-NEXT:{{^}} ^
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s
new file mode 100644
index 0000000..b7d93e1
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s
@@ -0,0 +1,20 @@
+// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --unique --sort --version 5
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 %s 2>&1 | FileCheck --check-prefix=GFX12 --implicit-check-not=error: %s
+
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX12: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0] row_mask:0x0 bank_mask:0x0
+// GFX12: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16_dpp v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX12: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
+v_pk_fmac_f16_dpp v0, v1, v2 quad_perm:[1,2,3,0] row_mask:0x0 bank_mask:0x0
+// GFX12: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
+v_pk_fmac_f16_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt
index e03c432..0870aa7 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt
@@ -17,3 +17,36 @@
# GFX1250: ds_atomic_barrier_arrive_rtn_b64 v[2:3], v2, v[4:5] offset:513 ; encoding: [0x01,0x02,0xd4,0xd9,0x02,0x04,0x00,0x02]
0x01,0x02,0xd4,0xd9,0x02,0x04,0x00,0x02
+
+# GFX1250: ds_add_f64 v1, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x50,0xd9,0x01,0xfe,0x00,0x00]
+0xff,0xff,0x50,0xd9,0x01,0xfe,0x00,0x00
+
+# GFX1250: ds_add_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x50,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x50,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_f64 v1, v[2:3] offset:4 ; encoding: [0x04,0x00,0x50,0xd9,0x01,0x02,0x00,0x00]
+0x04,0x00,0x50,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x50,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x50,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_f64 v255, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x50,0xd9,0xff,0x02,0x00,0x00]
+0xff,0xff,0x50,0xd9,0xff,0x02,0x00,0x00
+
+# GFX1250: ds_add_rtn_f64 v[254:255], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd9,0x01,0x02,0x00,0xfe]
+0xff,0xff,0xd0,0xd9,0x01,0x02,0x00,0xfe
+
+# GFX1250: ds_add_rtn_f64 v[4:5], v1, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd9,0x01,0xfe,0x00,0x04]
+0xff,0xff,0xd0,0xd9,0x01,0xfe,0x00,0x04
+
+# GFX1250: ds_add_rtn_f64 v[4:5], v1, v[2:3] ; encoding: [0x00,0x00,0xd0,0xd9,0x01,0x02,0x00,0x04]
+0x00,0x00,0xd0,0xd9,0x01,0x02,0x00,0x04
+
+# GFX1250: ds_add_rtn_f64 v[4:5], v1, v[2:3] offset:4 ; encoding: [0x04,0x00,0xd0,0xd9,0x01,0x02,0x00,0x04]
+0x04,0x00,0xd0,0xd9,0x01,0x02,0x00,0x04
+
+# GFX1250: ds_add_rtn_f64 v[4:5], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd9,0x01,0x02,0x00,0x04]
+0xff,0xff,0xd0,0xd9,0x01,0x02,0x00,0x04
+
+# GFX1250: ds_add_rtn_f64 v[4:5], v255, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd9,0xff,0x02,0x00,0x04]
+0xff,0xff,0xd0,0xd9,0xff,0x02,0x00,0x04
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt
index a2f1211..2499225 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt
@@ -8,3 +8,93 @@
# GFX1250: buffer_store_b128 v[2:5], v0, s[12:15], s4 idxen offset:4095 nv ; encoding: [0x84,0x40,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0x0f,0x00]
0x84,0x40,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], off, s[12:15], s3 offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x03,0x40,0x15,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], off, s[96:99], s3 offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x03,0x40,0x15,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s101 offset:4095 ; encoding: [0x65,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x65,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], m0 offset:4095 ; encoding: [0x7d,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x7d,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00]
+0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], v0, s[8:11], s3 offen offset:4095 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00]
+0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_add_f64 v[4:5], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x15,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[12:15], s3 offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x03,0xc0,0x16,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[96:99], s3 offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x03,0xc0,0x16,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s101 offset:4095 ; encoding: [0x65,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x65,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], m0 offset:4095 ; encoding: [0x7d,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x7d,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00]
+0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00]
+0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_num_f64 v[4:5], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x16,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[12:15], s3 offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x03,0x00,0x17,0xc4,0x04,0x18,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[96:99], s3 offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x03,0x00,0x17,0xc4,0x04,0xc0,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s101 offset:4095 ; encoding: [0x65,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x65,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], m0 offset:4095 ; encoding: [0x7d,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00]
+0x7d,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 idxen offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00]
+0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x80,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], v0, s[8:11], s3 offen offset:4095 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00]
+0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x40,0x00,0xff,0x0f,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_num_f64 v[4:5], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x17,0xc4,0x04,0x10,0x80,0x00,0x00,0x07,0x00,0x00
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vflat.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vflat.txt
index 291192b..8149dea 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vflat.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vflat.txt
@@ -6,6 +6,12 @@
# GFX1250: flat_atomic_add_f32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x15,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00]
0x02,0x80,0x15,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00
+# GFX1250: flat_atomic_add_f64 v3, v[2:3], s[2:3] ; encoding: [0x02,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x03,0x00,0x00,0x00]
+0x02,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x03,0x00,0x00,0x00
+
+# GFX1250: flat_atomic_add_f64 v[0:1], v3, v[2:3], s[2:3] scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x15,0xec,0x00,0x00,0x11,0x01,0x03,0x00,0x00,0x00]
+0x02,0x40,0x15,0xec,0x00,0x00,0x11,0x01,0x03,0x00,0x00,0x00
+
# GFX1250: flat_atomic_add_u32 v0, v1, v2, s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0xc0,0xff,0xff]
0x02,0x40,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0xc0,0xff,0xff
@@ -3177,6 +3183,75 @@
# GFX1250: global_load_monitor_b64 v[2:3], v2, s[4:5] offset:64 scale_offset ; encoding: [0x04,0x40,0x1c,0xee,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00]
0x04,0x40,0x1c,0xee,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00
+# GFX1250: flat_atomic_add_f64 v[0:1], v[2:3] offset:4095 ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00]
+0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00
+
+# GFX1250: flat_atomic_add_f64 v[254:255], v[2:3] offset:4095 ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00]
+0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00
+
+# GFX1250: flat_atomic_add_f64 v[0:1], v[254:255] offset:4095 ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00]
+0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00
+
+# GFX1250: flat_atomic_add_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: flat_atomic_add_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: flat_atomic_add_f64 v[0:1], v[2:3] offset:7 ; encoding: [0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00]
+0x7c,0x40,0x15,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00
+
+# GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] offset:4095 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00]
+0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00
+
+# GFX1250: flat_atomic_min_num_f64 v[254:255], v[2:3] offset:4095 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00]
+0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00
+
+# GFX1250: flat_atomic_min_num_f64 v[0:1], v[254:255] offset:4095 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00]
+0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00
+
+# GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: flat_atomic_min_num_f64 v[0:1], v[2:3] offset:7 ; encoding: [0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00]
+0x7c,0xc0,0x16,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00
+
+# GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] offset:4095 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00]
+0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0xff,0x0f,0x00
+
+# GFX1250: flat_atomic_max_num_f64 v[254:255], v[2:3] offset:4095 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00]
+0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0xfe,0xff,0x0f,0x00
+
+# GFX1250: flat_atomic_max_num_f64 v[0:1], v[254:255] offset:4095 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00]
+0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x7f,0x00,0xff,0x0f,0x00
+
+# GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: flat_atomic_max_num_f64 v[0:1], v[2:3] offset:7 ; encoding: [0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00]
+0x7c,0x00,0x17,0xec,0x00,0x00,0x00,0x01,0x00,0x07,0x00,0x00
+
+# GFX1250: global_atomic_add_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0x40,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0x40,0x15,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: global_atomic_min_num_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0xc0,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: global_atomic_max_num_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0x00,0x17,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0x00,0x17,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: global_atomic_min_num_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0xc0,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x16,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
+# GFX1250: global_atomic_max_num_f64 v[0:1], v[2:3], off ; encoding: [0x7c,0x00,0x17,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00]
+0x7c,0x00,0x17,0xee,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00
+
# GFX1250: global_load_async_to_lds_b128 v1, v[2:3], off offset:64 ; encoding: [0x7c,0x80,0x18,0xee,0x01,0x00,0x00,0x00,0x02,0x40,0x00,0x00]
0x7c,0x80,0x18,0xee,0x01,0x00,0x00,0x00,0x02,0x40,0x00,0x00
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt
index a1a1d0c..ce8cfcb 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt
@@ -2,6 +2,126 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250,GFX1250-REAL16 %s
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250,GFX1250-FAKE16 %s
+0xff,0x44,0x33,0xd6,0xff,0xd6,0xf0,0xe9,0x0b,0xfe,0x00,0x00
+# GFX1250-REAL16: v_bitop3_b16 v255.h, 0xfe0b, vcc_hi, null bitop3:0x67 op_sel:[0,0,0,1] ; encoding: [0xff,0x44,0x33,0xd6,0xff,0xd6,0xf0,0xe9,0x0b,0xfe,0x00,0x00]
+# GFX1250-FAKE16: v_bitop3_b16 v255, 0xfe0b, vcc_hi, null bitop3:0x67 op_sel:[0,0,0,1] ; encoding: [0xff,0x44,0x33,0xd6,0xff,0xd6,0xf0,0xe9,0x0b,0xfe,0x00,0x00]
+
+0x05,0x0c,0x33,0xd6,0xc1,0xfe,0xf4,0x6b
+# GFX1250-REAL16: v_bitop3_b16 v5.l, -1, exec_hi, src_scc bitop3:0x63 op_sel:[1,0,0,0] ; encoding: [0x05,0x0c,0x33,0xd6,0xc1,0xfe,0xf4,0x6b]
+# GFX1250-FAKE16: v_bitop3_b16 v5, -1, exec_hi, src_scc bitop3:0x63 op_sel:[1,0,0,0] ; encoding: [0x05,0x0c,0x33,0xd6,0xc1,0xfe,0xf4,0x6b]
+
+0x05,0x14,0x33,0xd6,0xff,0xfa,0xfc,0xab,0x00,0x38,0x00,0x00
+# GFX1250-REAL16: v_bitop3_b16 v5.l, 0x3800, m0, 0x3800 bitop3:0x65 op_sel:[0,1,0,0] ; encoding: [0x05,0x14,0x33,0xd6,0xff,0xfa,0xfc,0xab,0x00,0x38,0x00,0x00]
+# GFX1250-FAKE16: v_bitop3_b16 v5, 0x3800, m0, 0x3800 bitop3:0x65 op_sel:[0,1,0,0] ; encoding: [0x05,0x14,0x33,0xd6,0xff,0xfa,0xfc,0xab,0x00,0x38,0x00,0x00]
+
+0x05,0x7e,0x33,0xd6,0x7f,0xf8,0xa8,0xe9
+# GFX1250-REAL16: v_bitop3_b16 v5.h, exec_hi, null, vcc_lo bitop3:0x77 op_sel:[1,1,1,1] ; encoding: [0x05,0x7e,0x33,0xd6,0x7f,0xf8,0xa8,0xe9]
+# GFX1250-FAKE16: v_bitop3_b16 v5, exec_hi, null, vcc_lo bitop3:0x77 op_sel:[1,1,1,1] ; encoding: [0x05,0x7e,0x33,0xd6,0x7f,0xf8,0xa8,0xe9]
+
+0x05,0x78,0x33,0xd6,0x7f,0xf8,0xa8,0x01
+# GFX1250-REAL16: v_bitop3_b16 v5.h, exec_hi, null, vcc_lo op_sel:[1,1,1,1] ; encoding: [0x05,0x78,0x33,0xd6,0x7f,0xf8,0xa8,0x01]
+# GFX1250-FAKE16: v_bitop3_b16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1] ; encoding: [0x05,0x78,0x33,0xd6,0x7f,0xf8,0xa8,0x01]
+
+0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1
+# GFX1250-REAL16: v_bitop3_b16 v5.l, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1]
+# GFX1250-FAKE16: v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1]
+
+0x05,0x00,0x33,0xd6,0x7d,0xfe,0xf5,0xa1,0x00,0x38,0x00,0x00
+# GFX1250-REAL16: v_bitop3_b16 v5.l, m0, 0x3800, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xfe,0xf5,0xa1,0x00,0x38,0x00,0x00]
+# GFX1250-FAKE16: v_bitop3_b16 v5, m0, 0x3800, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xfe,0xf5,0xa1,0x00,0x38,0x00,0x00]
+
+0x05,0x01,0x33,0xd6,0x7c,0xfc,0xfc,0x13,0x0b,0xfe,0x00,0x00
+# GFX1250-REAL16: v_bitop3_b16 v5.l, null, exec_lo, 0xfe0b bitop3:0x88 ; encoding: [0x05,0x01,0x33,0xd6,0x7c,0xfc,0xfc,0x13,0x0b,0xfe,0x00,0x00]
+# GFX1250-FAKE16: v_bitop3_b16 v5, null, exec_lo, 0xfe0b bitop3:0x88 ; encoding: [0x05,0x01,0x33,0xd6,0x7c,0xfc,0xfc,0x13,0x0b,0xfe,0x00,0x00]
+
+0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89
+# GFX1250-REAL16: v_bitop3_b16 v5.l, s1, v255.l, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89]
+# GFX1250-FAKE16: v_bitop3_b16 v5, s1, v255, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89]
+
+0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01
+# GFX1250-REAL16: v_bitop3_b16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01]
+# GFX1250-FAKE16: v_bitop3_b16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01]
+
+0x05,0x24,0x33,0xd6,0xfd,0xd4,0x04,0xcb
+# GFX1250-REAL16: v_bitop3_b16 v5.l, src_scc, vcc_lo, -1 bitop3:0x66 op_sel:[0,0,1,0] ; encoding: [0x05,0x24,0x33,0xd6,0xfd,0xd4,0x04,0xcb]
+# GFX1250-FAKE16: v_bitop3_b16 v5, src_scc, vcc_lo, -1 bitop3:0x66 op_sel:[0,0,1,0] ; encoding: [0x05,0x24,0x33,0xd6,0xfd,0xd4,0x04,0xcb]
+
+0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81
+# GFX1250-REAL16: v_bitop3_b16 v5.l, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81]
+# GFX1250-FAKE16: v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81]
+
+0x05,0x00,0x33,0xd6,0x01,0x05,0x0e,0x00
+# GFX1250-REAL16: v_bitop3_b16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x33,0xd6,0x01,0x05,0x0e,0x00]
+# GFX1250-FAKE16: v_bitop3_b16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x33,0xd6,0x01,0x05,0x0e,0x00]
+
+0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30
+# GFX1250-REAL16: v_bitop3_b16 v5.l, v1.l, v2.l, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30]
+# GFX1250-FAKE16: v_bitop3_b16 v5, v1, v2, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30]
+
+0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1
+# GFX1250-REAL16: v_bitop3_b16 v5.l, v255.l, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1]
+# GFX1250-FAKE16: v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1]
+
+0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00
+# GFX1250-REAL16: v_bitop3_b16 v5.l, vcc_hi, 0xfe0b, v255.l bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00]
+# GFX1250-FAKE16: v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00]
+
+0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4
+# GFX1250-REAL16: v_bitop3_b16 v5.l, vcc_lo, ttmp15, v3.l bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4]
+# GFX1250-FAKE16: v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4]
+
+0x01,0x7c,0x33,0xd6,0x02,0x07,0x12,0xec
+# GFX1250-REAL16: v_bitop3_b16 v1.h, v2.h, v3.h, v4.h bitop3:0x67 op_sel:[1,1,1,1] ; encoding: [0x01,0x7c,0x33,0xd6,0x02,0x07,0x12,0xec]
+# GFX1250-FAKE16: v_bitop3_b16 v1, v2, v3, v4 bitop3:0x67 op_sel:[1,1,1,1] ; encoding: [0x01,0x7c,0x33,0xd6,0x02,0x07,0x12,0xec]
+
+0xff,0x04,0x34,0xd6,0xff,0xd6,0xf0,0xe9,0x56,0x34,0x12,0xaf
+# GFX1250: v_bitop3_b32 v255, 0xaf123456, vcc_hi, null bitop3:0x67 ; encoding: [0xff,0x04,0x34,0xd6,0xff,0xd6,0xf0,0xe9,0x56,0x34,0x12,0xaf]
+
+0x05,0x04,0x34,0xd6,0xc1,0xfe,0xf4,0x6b
+# GFX1250: v_bitop3_b32 v5, -1, exec_hi, src_scc bitop3:0x63 ; encoding: [0x05,0x04,0x34,0xd6,0xc1,0xfe,0xf4,0x6b]
+
+0x05,0x04,0x34,0xd6,0xf0,0xfa,0xc0,0xab
+# GFX1250: v_bitop3_b32 v5, 0.5, m0, 0.5 bitop3:0x65 ; encoding: [0x05,0x04,0x34,0xd6,0xf0,0xfa,0xc0,0xab]
+
+0x05,0x01,0x34,0xd6,0x7f,0xf8,0xa8,0xa9
+# GFX1250: v_bitop3_b32 v5, exec_hi, null, vcc_lo bitop3:0x4d ; encoding: [0x05,0x01,0x34,0xd6,0x7f,0xf8,0xa8,0xa9]
+
+0x05,0x00,0x34,0xd6,0x7e,0x82,0xad,0xc1
+# GFX1250: v_bitop3_b32 v5, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x34,0xd6,0x7e,0x82,0xad,0xc1]
+
+0x05,0x00,0x34,0xd6,0x7d,0xe0,0xf5,0xa1
+# GFX1250: v_bitop3_b32 v5, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x34,0xd6,0x7d,0xe0,0xf5,0xa1]
+
+0x05,0x03,0x34,0xd6,0x7c,0xfc,0xfc,0x0b,0x56,0x34,0x12,0xaf
+# GFX1250: v_bitop3_b32 v5, null, exec_lo, 0xaf123456 bitop3:0x58 ; encoding: [0x05,0x03,0x34,0xd6,0x7c,0xfc,0xfc,0x0b,0x56,0x34,0x12,0xaf]
+
+0x05,0x04,0x34,0xd6,0x01,0xfe,0xff,0x89
+# GFX1250: v_bitop3_b32 v5, s1, v255, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x34,0xd6,0x01,0xfe,0xff,0x89]
+
+0x05,0x00,0x34,0xd6,0x69,0xd2,0xf8,0x01
+# GFX1250: v_bitop3_b32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x34,0xd6,0x69,0xd2,0xf8,0x01]
+
+0x05,0x04,0x34,0xd6,0xfd,0xd4,0x04,0xcb
+# GFX1250: v_bitop3_b32 v5, src_scc, vcc_lo, -1 bitop3:0x66 ; encoding: [0x05,0x04,0x34,0xd6,0xfd,0xd4,0x04,0xcb]
+
+0x05,0x04,0x34,0xd6,0x7b,0xfa,0xed,0x81
+# GFX1250: v_bitop3_b32 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x34,0xd6,0x7b,0xfa,0xed,0x81]
+
+0x05,0x00,0x34,0xd6,0x01,0x05,0x0e,0x00
+# GFX1250: v_bitop3_b32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x34,0xd6,0x01,0x05,0x0e,0x00]
+
+0x05,0x04,0x34,0xd6,0x01,0x05,0x0e,0x30
+# GFX1250: v_bitop3_b32 v5, v1, v2, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x34,0xd6,0x01,0x05,0x0e,0x30]
+
+0x05,0x04,0x34,0xd6,0xff,0x05,0xa4,0xe1
+# GFX1250: v_bitop3_b32 v5, v255, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x34,0xd6,0xff,0x05,0xa4,0xe1]
+
+0x05,0x07,0x34,0xd6,0x6b,0xfe,0xfd,0xe7,0x56,0x34,0x12,0xaf
+# GFX1250: v_bitop3_b32 v5, vcc_hi, 0xaf123456, v255 bitop3:0x3f ; encoding: [0x05,0x07,0x34,0xd6,0x6b,0xfe,0xfd,0xe7,0x56,0x34,0x12,0xaf]
+
+0x05,0x02,0x34,0xd6,0x6a,0xf6,0x0c,0xa4
+# GFX1250: v_bitop3_b32 v5, vcc_lo, ttmp15, v3 bitop3:0x15 ; encoding: [0x05,0x02,0x34,0xd6,0x6a,0xf6,0x0c,0xa4]
+
0x02,0x00,0x52,0xd6,0x04,0x0e,0x22,0x04
# GFX1250: v_lshl_add_u64 v[2:3], s[4:5], v7, v[8:9] ; encoding: [0x02,0x00,0x52,0xd6,0x04,0x0e,0x22,0x04]
@@ -17,6 +137,165 @@
0x02,0x00,0x52,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
# GFX1250: v_lshl_add_u64 v[2:3], v[4:5], v7, 0x3039 ; encoding: [0x02,0x00,0x52,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+0x02,0x00,0x35,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_mad_u32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x35,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_mad_u32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x35,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_mad_u32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x35,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_mad_u32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x35,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_mad_u32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x35,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x1b,0xd7,0x04,0x0c,0x02,0x00
+# GFX1250: v_max_i64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0x0c,0x02,0x00]
+
+0x02,0x00,0x1b,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00
+# GFX1250: v_max_i64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x1b,0xd7,0x04,0x03,0x01,0x00
+# GFX1250: v_max_i64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0x03,0x01,0x00]
+
+0x02,0x00,0x1b,0xd7,0x04,0x05,0x00,0x00
+# GFX1250: v_max_i64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x1b,0xd7,0x04,0x05,0x00,0x00]
+
+0x02,0x00,0x19,0xd7,0x04,0x0c,0x02,0x00
+# GFX1250: v_max_u64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x19,0xd7,0x04,0x0c,0x02,0x00]
+
+0x02,0x00,0x19,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00
+# GFX1250: v_max_u64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x19,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x19,0xd7,0x04,0x03,0x01,0x00
+# GFX1250: v_max_u64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x19,0xd7,0x04,0x03,0x01,0x00]
+
+0x02,0x00,0x19,0xd7,0x04,0x05,0x00,0x00
+# GFX1250: v_max_u64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x19,0xd7,0x04,0x05,0x00,0x00]
+
+0x02,0x00,0x1a,0xd7,0x04,0x0c,0x02,0x00
+# GFX1250: v_min_i64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0x0c,0x02,0x00]
+
+0x02,0x00,0x1a,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00
+# GFX1250: v_min_i64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x1a,0xd7,0x04,0x03,0x01,0x00
+# GFX1250: v_min_i64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0x03,0x01,0x00]
+
+0x02,0x00,0x1a,0xd7,0x04,0x05,0x00,0x00
+# GFX1250: v_min_i64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x1a,0xd7,0x04,0x05,0x00,0x00]
+
+0x02,0x00,0x18,0xd7,0x04,0x0c,0x02,0x00
+# GFX1250: v_min_u64 v[2:3], s[4:5], v[6:7] ; encoding: [0x02,0x00,0x18,0xd7,0x04,0x0c,0x02,0x00]
+
+0x02,0x00,0x18,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00
+# GFX1250: v_min_u64 v[2:3], v[4:5], 0x3039 ; encoding: [0x02,0x00,0x18,0xd7,0x04,0xff,0x01,0x00,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x18,0xd7,0x04,0x03,0x01,0x00
+# GFX1250: v_min_u64 v[2:3], v[4:5], 1 ; encoding: [0x02,0x00,0x18,0xd7,0x04,0x03,0x01,0x00]
+
+0x02,0x00,0x18,0xd7,0x04,0x05,0x00,0x00
+# GFX1250: v_min_u64 v[2:3], v[4:5], s[2:3] ; encoding: [0x02,0x00,0x18,0xd7,0x04,0x05,0x00,0x00]
+
+0x02,0x00,0xfa,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_mad_nc_u64_u32 v[2:3], s4, 4, v[2:3] ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0xfa,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9] ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0xfa,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_mad_nc_u64_u32 v[2:3], v4, 0, 1 ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0xfa,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_mad_nc_u64_u32 v[2:3], v4, 3, s[2:3] ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0xfa,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_mad_nc_u64_u32 v[2:3], v4, v7, 0x3039 ; encoding: [0x02,0x00,0xfa,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x80,0xfa,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_mad_nc_u64_u32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfa,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0xfb,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_mad_nc_i64_i32 v[2:3], s4, 4, v[2:3] ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0xfb,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0xfb,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_mad_nc_i64_i32 v[2:3], v4, 0, 1 ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0xfb,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_mad_nc_i64_i32 v[2:3], v4, 3, s[2:3] ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0xfb,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_mad_nc_i64_i32 v[2:3], v4, v7, 0x3039 ; encoding: [0x02,0x00,0xfb,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_min_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_min_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_min_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_min_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_min_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_max_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_max_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_max_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_max_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_max_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_min_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_min_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_min_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_min_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_min_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_max_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_max_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_max_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_max_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_max_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf
# GFX1250: v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
@@ -62,6 +341,482 @@
0x05,0x00,0x6d,0xd7,0x6a,0xf6,0x00,0x00
# GFX1250: v_cvt_pk_bf16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6d,0xd7,0x6a,0xf6,0x00,0x00]
-## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-# GFX1250-FAKE16: {{.*}}
-# GFX1250-REAL16: {{.*}}
+0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43]
+
+0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33]
+
+0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21]
+
+0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61]
+
+0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xfc,0x4b,0x00,0x38,0x00,0x00
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0x3800 mul:2 ; encoding: [0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xfc,0x4b,0x00,0x38,0x00,0x00]
+
+0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01]
+
+0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01]
+
+0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01]
+
+0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00]
+
+0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01]
+
+0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04]
+
+0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01]
+
+0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_ashr_pk_i8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_ashr_pk_i8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_ashr_pk_i8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_ashr_pk_i8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_ashr_pk_i8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04
+# GFX1250: v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04]
+
+0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_ashr_pk_u8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_ashr_pk_u8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_ashr_pk_u8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_ashr_pk_u8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_ashr_pk_u8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04
+# GFX1250: v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04]
+
+0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, v2 clamp ; encoding: [0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08
+# GFX1250: v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08]
+
+0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, exec_hi, null ; encoding: [0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00]
+
+0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00]
+
+0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, null, exec_lo ; encoding: [0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, s1, s2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, s105, s105 ; encoding: [0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10
+# GFX1250: v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4 ; encoding: [0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10]
+
+0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00]
+
+0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00]
+
+0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00]
+
+0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456 ; encoding: [0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00]
+
+0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43]
+
+0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33]
+
+0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21]
+
+0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61]
+
+0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01]
+
+0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01]
+
+0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01]
+
+0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00]
+
+0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01]
+
+0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04]
+
+0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01]
+
+0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+
+0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+
+0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00
+# GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_pk_bf8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00
+# GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00
+# GFX1250: v_cvt_sr_fp8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20
+# GFX1250: v_cvt_sr_fp8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00
+# GFX1250: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00
+# GFX1250: v_cvt_sr_bf8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20
+# GFX1250: v_cvt_sr_bf8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1 ; encoding: [0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5 ; encoding: [0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2 ; encoding: [0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3 ; encoding: [0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1 ; encoding: [0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt
index dec73b7..5fa7bc8 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt
@@ -2,6 +2,172 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x0d,0x30
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v255.l, v255.l, v255.l, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x0d,0x30]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x0d,0x30]
+
+0xff,0x45,0x33,0xd6,0xfa,0xfe,0xf7,0x0b,0xff,0x6f,0x0d,0x30
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v255.h, v255.l, v255.l, src_scc bitop3:0x68 op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x45,0x33,0xd6,0xfa,0xfe,0xf7,0x0b,0xff,0x6f,0x0d,0x30]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x68 op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x45,0x33,0xd6,0xfa,0xfe,0xf7,0x0b,0xff,0x6f,0x0d,0x30]
+
+0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13]
+
+0x05,0x25,0x33,0xd6,0xfa,0x04,0x06,0x0b,0x01,0x60,0x01,0x13
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:0x68 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x25,0x33,0xd6,0xfa,0x04,0x06,0x0b,0x01,0x60,0x01,0x13]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x68 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x25,0x33,0xd6,0xfa,0x04,0x06,0x0b,0x01,0x60,0x01,0x13]
+
+0x05,0x7c,0x33,0xd6,0xfa,0x04,0xfe,0xc9,0x01,0x2f,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi bitop3:0x66 op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0xfe,0xc9,0x01,0x2f,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:0x66 op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0xfe,0xc9,0x01,0x2f,0x01,0xff]
+
+0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff]
+
+0x05,0x78,0x33,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x33,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x33,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff]
+
+0x05,0x0c,0x33,0xd6,0xfa,0x04,0xfa,0xe9,0x01,0x50,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.h, v2.l, exec_lo bitop3:0x67 op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x0c,0x33,0xd6,0xfa,0x04,0xfa,0xe9,0x01,0x50,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x67 op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x0c,0x33,0xd6,0xfa,0x04,0xfa,0xe9,0x01,0x50,0x01,0xff]
+
+0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
+
+0x05,0x15,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.h, null bitop3:0x68 op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x15,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:0x68 op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x15,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
+
+0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff]
+
+0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff]
+
+0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x33,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff]
+
+0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff]
+
+0x05,0x7c,0x33,0xd6,0xfa,0x04,0x0e,0xcc,0x01,0xe4,0x00,0xff
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:0x66 op_sel:[1,1,1,1] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0x0e,0xcc,0x01,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x66 op_sel:[1,1,1,1] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0x0e,0xcc,0x01,0xe4,0x00,0xff]
+
+0xff,0x04,0x34,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x0d,0x30
+# GFX1250: v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x04,0x34,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x0d,0x30]
+
+0x05,0x03,0x34,0xd6,0xfa,0x04,0x06,0x0b,0x01,0x5f,0x01,0x01
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x34,0xd6,0xfa,0x04,0x06,0x0b,0x01,0x5f,0x01,0x01]
+
+0x05,0x04,0x34,0xd6,0xfa,0x04,0xc2,0x6b,0x01,0x60,0x09,0x13
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0xc2,0x6b,0x01,0x60,0x09,0x13]
+
+0x05,0x00,0x34,0xd6,0xfa,0x04,0xfe,0xa1,0x01,0x21,0x01,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0xfe,0xa1,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x34,0xd6,0xfa,0x04,0xfa,0xc1,0x01,0x2f,0x01,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0xfa,0xc1,0x01,0x2f,0x01,0xff]
+
+0x05,0x01,0x34,0xd6,0xfa,0x04,0xf2,0xa9,0x01,0x50,0x01,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x34,0xd6,0xfa,0x04,0xf2,0xa9,0x01,0x50,0x01,0xff]
+
+0x05,0x00,0x34,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+0x05,0x04,0x34,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+0x05,0x04,0x34,0xd6,0xfa,0x04,0xfe,0x8f,0x01,0x41,0x01,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0xfe,0x8f,0x01,0x41,0x01,0xff]
+
+0x05,0x04,0x34,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff]
+
+0x05,0x04,0x34,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x34,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x34,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x34,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x02,0x34,0xd6,0xfa,0x04,0xae,0xa1,0x01,0x0f,0x01,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:0x15 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x34,0xd6,0xfa,0x04,0xae,0xa1,0x01,0x0f,0x01,0xff]
+
+0x05,0x07,0x34,0xd6,0xfa,0x04,0xaa,0xe1,0x01,0x11,0x01,0xff
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x34,0xd6,0xfa,0x04,0xaa,0xe1,0x01,0x11,0x01,0xff]
+
+0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff
+# GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+
+0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff
+# GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+
+0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff
+# GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+
+0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff
+# GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+
+0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
@@ -43,3 +209,216 @@
0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v1.l, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v1.h, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
+# GFX1250: v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+
+0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+
+0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+
+0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+
+0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+
+0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+
+0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+
+0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+
+0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+
+0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+
+0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+
+0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+
+0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt
index db211f90..faeff45 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt
@@ -2,6 +2,138 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+0xff,0x01,0x33,0xd6,0xe9,0xfe,0xf7,0xab,0xff,0x00,0x00,0x00
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v255.l, v255.l, v255.l, src_scc bitop3:0x4d dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x01,0x33,0xd6,0xe9,0xfe,0xf7,0xab,0xff,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x4d dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x01,0x33,0xd6,0xe9,0xfe,0xf7,0xab,0xff,0x00,0x00,0x00]
+
+0xff,0x44,0x33,0xd6,0xea,0xfe,0xf7,0xeb,0xff,0x00,0x00,0x00
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v255.h, v255.l, v255.l, src_scc bitop3:0x67 op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x44,0x33,0xd6,0xea,0xfe,0xf7,0xeb,0xff,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x67 op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x44,0x33,0xd6,0xea,0xfe,0xf7,0xeb,0xff,0x00,0x00,0x00]
+
+0x05,0x24,0x33,0xd6,0xe9,0x04,0x06,0xcb,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:0x66 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x24,0x33,0xd6,0xe9,0x04,0x06,0xcb,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x66 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x24,0x33,0xd6,0xe9,0x04,0x06,0xcb,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x33,0xd6,0xea,0x04,0x06,0xc3,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x33,0xd6,0xea,0x04,0x06,0xc3,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x33,0xd6,0xea,0x04,0x06,0xc3,0x01,0x77,0x39,0x05]
+
+0x05,0x07,0x33,0xd6,0xe9,0x04,0xfe,0xe1,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_hi bitop3:0x3f dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x07,0x33,0xd6,0xe9,0x04,0xfe,0xe1,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:0x3f dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x07,0x33,0xd6,0xe9,0x04,0xfe,0xe1,0x01,0x77,0x39,0x05]
+
+0x05,0x7b,0x33,0xd6,0xe9,0x04,0xfe,0x09,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi bitop3:0x58 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x7b,0x33,0xd6,0xe9,0x04,0xfe,0x09,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:0x58 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x7b,0x33,0xd6,0xe9,0x04,0xfe,0x09,0x01,0x77,0x39,0x05]
+
+0x05,0x78,0x33,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x33,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x33,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x04,0x33,0xd6,0xe9,0x04,0xfa,0x81,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xfa,0x81,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xfa,0x81,0x01,0x77,0x39,0x05]
+
+0x05,0x0c,0x33,0xd6,0xe9,0x04,0xfa,0x69,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.h, v2.l, exec_lo bitop3:0x63 op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0c,0x33,0xd6,0xe9,0x04,0xfa,0x69,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x63 op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0c,0x33,0xd6,0xe9,0x04,0xfa,0x69,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x33,0xd6,0xe9,0x04,0xf2,0xa1,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0xf2,0xa1,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0xf2,0xa1,0x01,0x77,0x39,0x05]
+
+0x05,0x10,0x33,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.h, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x10,0x33,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x10,0x33,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x04,0x33,0xd6,0xe9,0x04,0xa6,0xe1,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xa6,0xe1,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xa6,0xe1,0x01,0x77,0x39,0x05]
+
+0x05,0x01,0x33,0xd6,0xe9,0x04,0xee,0xe1,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, ttmp15 bitop3:0xf dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x33,0xd6,0xe9,0x04,0xee,0xe1,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:0xf dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x33,0xd6,0xe9,0x04,0xee,0xe1,0x01,0x77,0x39,0x05]
+
+0x05,0x04,0x33,0xd6,0xe9,0x04,0xfe,0x37,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v255.l bitop3:0xa1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xfe,0x37,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:0xa1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xfe,0x37,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x33,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x04,0x33,0xd6,0xe9,0x04,0xae,0x89,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi bitop3:0x64 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xae,0x89,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:0x64 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x33,0xd6,0xe9,0x04,0xae,0x89,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x33,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x33,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x7c,0x33,0xd6,0xe9,0x04,0x0e,0xcc,0x01,0x00,0x00,0x00
+# GFX1250-REAL16: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:0x66 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x7c,0x33,0xd6,0xe9,0x04,0x0e,0xcc,0x01,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x66 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x7c,0x33,0xd6,0xe9,0x04,0x0e,0xcc,0x01,0x00,0x00,0x00]
+
+0xff,0x03,0x34,0xd6,0xe9,0xfe,0xf7,0x0b,0xff,0x00,0x00,0x00
+# GFX1250: v_bitop3_b32_e64_dpp v255, v255, v255, src_scc bitop3:0x58 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x03,0x34,0xd6,0xe9,0xfe,0xf7,0x0b,0xff,0x00,0x00,0x00]
+
+0x05,0x00,0x34,0xd6,0xe9,0x04,0x06,0xc3,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, -1 bitop3:6 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0x06,0xc3,0x01,0x77,0x39,0x05]
+
+0x05,0x01,0x34,0xd6,0xea,0x04,0xc2,0xab,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, 0.5 bitop3:0x4d dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x01,0x34,0xd6,0xea,0x04,0xc2,0xab,0x01,0x77,0x39,0x05]
+
+0x05,0x07,0x34,0xd6,0xe9,0x04,0xfe,0xe1,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_hi bitop3:0x3f dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x07,0x34,0xd6,0xe9,0x04,0xfe,0xe1,0x01,0x77,0x39,0x05]
+
+0x05,0x04,0x34,0xd6,0xe9,0x04,0xfa,0x81,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, exec_lo bitop3:0x24 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xfa,0x81,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x34,0xd6,0xe9,0x04,0xf2,0xa1,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, null bitop3:5 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0xf2,0xa1,0x01,0x77,0x39,0x05]
+
+0x05,0x04,0x34,0xd6,0xe9,0x04,0xa6,0xe1,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, s105 bitop3:0x27 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xa6,0xe1,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x34,0xd6,0xe9,0x04,0xee,0xa1,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, ttmp15 bitop3:0x15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x34,0xd6,0xe9,0x04,0xee,0xa1,0x01,0x77,0x39,0x05]
+
+0x05,0x04,0x34,0xd6,0xe9,0x04,0xfe,0x37,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v255 bitop3:0xa1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xfe,0x37,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x34,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x04,0x34,0xd6,0xe9,0x04,0xae,0x89,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_hi bitop3:0x64 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x34,0xd6,0xe9,0x04,0xae,0x89,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x34,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
@@ -13,3 +145,162 @@
0x05,0x00,0x6d,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6d,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+
+0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v5.l, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v5.h, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21
+# GFX1250: v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+
+0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+
+0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+
+0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
diff --git a/llvm/test/MC/ELF/many-instructions.s b/llvm/test/MC/ELF/many-instructions.s
deleted file mode 100644
index cbdb2a7..0000000
--- a/llvm/test/MC/ELF/many-instructions.s
+++ /dev/null
@@ -1,10 +0,0 @@
-# REQUIRES: asserts
-# RUN: llvm-mc -filetype=obj -triple=x86_64 %s -o /dev/null -debug-only=mc-dump
-
-## Test that encodeInstruction may cause a new fragment to be created.
-# CHECK: 0 Data Size:16200
-# CHECK: 16200 Data Size:180
-
-.rept 16384/10
-movabsq $foo, %rax
-.endr
diff --git a/llvm/test/MC/RISCV/rvv/fadd.s b/llvm/test/MC/RISCV/rvv/fadd.s
index 890b2c0..e472780 100644
--- a/llvm/test/MC/RISCV/rvv/fadd.s
+++ b/llvm/test/MC/RISCV/rvv/fadd.s
@@ -1,15 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d --mattr=+v --mattr=+f - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vfadd.vv v8, v4, v20, v0.t
# CHECK-INST: vfadd.vv v8, v4, v20, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/fcompare.s b/llvm/test/MC/RISCV/rvv/fcompare.s
index 3903bbd..5453645 100644
--- a/llvm/test/MC/RISCV/rvv/fcompare.s
+++ b/llvm/test/MC/RISCV/rvv/fcompare.s
@@ -1,15 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d --mattr=+v --mattr=+f - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vmfeq.vv v8, v4, v20, v0.t
# CHECK-INST: vmfeq.vv v8, v4, v20, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/fdiv.s b/llvm/test/MC/RISCV/rvv/fdiv.s
index aa3aae5..78d11ed 100644
--- a/llvm/test/MC/RISCV/rvv/fdiv.s
+++ b/llvm/test/MC/RISCV/rvv/fdiv.s
@@ -1,15 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d --mattr=+v --mattr=+f - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vfdiv.vv v8, v4, v20, v0.t
# CHECK-INST: vfdiv.vv v8, v4, v20, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/fmacc.s b/llvm/test/MC/RISCV/rvv/fmacc.s
index 8ca43da..1b9c7a5 100644
--- a/llvm/test/MC/RISCV/rvv/fmacc.s
+++ b/llvm/test/MC/RISCV/rvv/fmacc.s
@@ -1,15 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d --mattr=+v --mattr=+f - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vfmacc.vv v8, v20, v4, v0.t
# CHECK-INST: vfmacc.vv v8, v20, v4, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/fminmax.s b/llvm/test/MC/RISCV/rvv/fminmax.s
index f7e85ed..2b00fcb 100644
--- a/llvm/test/MC/RISCV/rvv/fminmax.s
+++ b/llvm/test/MC/RISCV/rvv/fminmax.s
@@ -1,15 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d --mattr=+v --mattr=+f - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vfmin.vv v8, v4, v20, v0.t
# CHECK-INST: vfmin.vv v8, v4, v20, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/fmul.s b/llvm/test/MC/RISCV/rvv/fmul.s
index 9cd6e52..f3877f7 100644
--- a/llvm/test/MC/RISCV/rvv/fmul.s
+++ b/llvm/test/MC/RISCV/rvv/fmul.s
@@ -1,15 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d --mattr=+v --mattr=+f - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vfmul.vv v8, v4, v20, v0.t
# CHECK-INST: vfmul.vv v8, v4, v20, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/fmv.s b/llvm/test/MC/RISCV/rvv/fmv.s
index 2534b51..749fdcc 100644
--- a/llvm/test/MC/RISCV/rvv/fmv.s
+++ b/llvm/test/MC/RISCV/rvv/fmv.s
@@ -1,15 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d --mattr=+v --mattr=+f - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vfmv.v.f v8, fa0
# CHECK-INST: vfmv.v.f v8, fa0
diff --git a/llvm/test/MC/RISCV/rvv/fothers.s b/llvm/test/MC/RISCV/rvv/fothers.s
index 0236d31..f9069e3 100644
--- a/llvm/test/MC/RISCV/rvv/fothers.s
+++ b/llvm/test/MC/RISCV/rvv/fothers.s
@@ -1,13 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f --M no-aliases \
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s --M no-aliases \
# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f | llvm-objdump -d --mattr=+v --mattr=+f -M no-aliases - \
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f -M no-aliases - \
# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vfsqrt.v v8, v4, v0.t
# CHECK-INST: vfsqrt.v v8, v4, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/freduction.s b/llvm/test/MC/RISCV/rvv/freduction.s
index 190d60f..30361b8 100644
--- a/llvm/test/MC/RISCV/rvv/freduction.s
+++ b/llvm/test/MC/RISCV/rvv/freduction.s
@@ -1,15 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f --M no-aliases \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s --M no-aliases \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d --mattr=+v --mattr=+f \
-# RUN: -M no-aliases - | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f -M no-aliases - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vfredosum.vs v8, v4, v20, v0.t
# CHECK-INST: vfredosum.vs v8, v4, v20, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/fsub.s b/llvm/test/MC/RISCV/rvv/fsub.s
index 62ff2e7..97a54fb 100644
--- a/llvm/test/MC/RISCV/rvv/fsub.s
+++ b/llvm/test/MC/RISCV/rvv/fsub.s
@@ -1,15 +1,12 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zve32f %s \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d --mattr=+v --mattr=+f - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v %s \
-# RUN: --mattr=+f \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d --mattr=+zve32f - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zve32f %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vfsub.vv v8, v4, v20, v0.t
# CHECK-INST: vfsub.vv v8, v4, v20, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/zvfbfwma.s b/llvm/test/MC/RISCV/rvv/zvfbfwma.s
index 3ba5d38..9c0c1b1 100644
--- a/llvm/test/MC/RISCV/rvv/zvfbfwma.s
+++ b/llvm/test/MC/RISCV/rvv/zvfbfwma.s
@@ -40,29 +40,3 @@ vfwmaccbf16.vf v8, fa0, v4, v0.t
# CHECK-ERROR: instruction requires the following: 'Zvfbfwma' (Vector BF16 widening mul-add){{$}}
# CHECK-UNKNOWN: ee455457 <unknown>
vfwmaccbf16.vf v8, fa0, v4
-
-# Check scalar half FP load/store/move included in this extension.
-
-# CHECK-INST: flh ft0, 12(a0)
-# CHECK-ENCODING: [0x07,0x10,0xc5,0x00]
-# CHECK-ERROR: instruction requires the following: 'Zfh' (Half-Precision Floating-Point) or 'Zfhmin' (Half-Precision Floating-Point Minimal) or 'Zfbfmin' (Scalar BF16 Converts){{$}}
-# CHECK-UNKNOWN: 00c51007 <unknown>
-flh f0, 12(a0)
-
-# CHECK-INST: fsh ft6, 2047(s4)
-# CHECK-ENCODING: [0xa7,0x1f,0x6a,0x7e]
-# CHECK-ERROR: instruction requires the following: 'Zfh' (Half-Precision Floating-Point) or 'Zfhmin' (Half-Precision Floating-Point Minimal) or 'Zfbfmin' (Scalar BF16 Converts){{$}}
-# CHECK-UNKNOWN: 7e6a1fa7 <unknown>
-fsh f6, 2047(s4)
-
-# CHECK-INST: fmv.x.h a2, fs7
-# CHECK-ENCODING: [0x53,0x86,0x0b,0xe4]
-# CHECK-ERROR: instruction requires the following: 'Zfh' (Half-Precision Floating-Point) or 'Zfhmin' (Half-Precision Floating-Point Minimal) or 'Zfbfmin' (Scalar BF16 Converts){{$}}
-# CHECK-UNKNOWN: e40b8653 <unknown>
-fmv.x.h a2, fs7
-
-# CHECK-INST: fmv.h.x ft1, a6
-# CHECK-ENCODING: [0xd3,0x00,0x08,0xf4]
-# CHECK-ERROR: instruction requires the following: 'Zfh' (Half-Precision Floating-Point) or 'Zfhmin' (Half-Precision Floating-Point Minimal) or 'Zfbfmin' (Scalar BF16 Converts){{$}}
-# CHECK-UNKNOWN: f40800d3 <unknown>
-fmv.h.x ft1, a6
diff --git a/llvm/test/MC/RISCV/xqcisim-valid.s b/llvm/test/MC/RISCV/xqcisim-valid.s
index b5758f4..ed8e1df 100644
--- a/llvm/test/MC/RISCV/xqcisim-valid.s
+++ b/llvm/test/MC/RISCV/xqcisim-valid.s
@@ -3,15 +3,16 @@
# RUN: | FileCheck -check-prefixes=CHECK-ENC,CHECK-INST %s
# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+experimental-xqcisim < %s \
# RUN: | llvm-objdump --mattr=+experimental-xqcisim -M no-aliases --no-print-imm-hex -d - \
-# RUN: | FileCheck -check-prefix=CHECK-INST %s
+# RUN: | FileCheck -check-prefixes=CHECK-INST,CHECK-NOALIAS %s
# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-xqcisim -show-encoding \
-# RUN: | FileCheck -check-prefixes=CHECK-ENC,CHECK-INST %s
+# RUN: | FileCheck -check-prefixes=CHECK-ENC,CHECK-INST,CHECK-ALIAS %s
# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+experimental-xqcisim < %s \
# RUN: | llvm-objdump --mattr=+experimental-xqcisim --no-print-imm-hex -d - \
-# RUN: | FileCheck -check-prefix=CHECK-INST %s
+# RUN: | FileCheck -check-prefixes=CHECK-INST,CHECK-ALIAS %s
-# CHECK-INST: qc.psyscalli 1023
+# CHECK-ALIAS: qc.psyscalli 1023
+# CHECK-NOINST: slti zero, zero, 1023
# CHECK-ENC: encoding: [0x13,0x20,0xf0,0x3f]
qc.psyscalli 1023
@@ -19,34 +20,42 @@ qc.psyscalli 1023
# CHECK-ENC: encoding: [0x13,0x20,0xf0,0x4f]
qc.pputci 255
-# CHECK-INST: qc.c.ptrace
+# CHECK-ALIAS: qc.c.ptrace
+# CHECK-NOALIAS: c.slli zero, 0
# CHECK-ENC: encoding: [0x02,0x00]
qc.c.ptrace
-# CHECK-INST: qc.pcoredump
+# CHECK-ALIAS: qc.pcoredump
+# CHECK-NOALIAS: slti zero, zero, 1536
# CHECK-ENC: encoding: [0x13,0x20,0x00,0x60]
qc.pcoredump
-# CHECK-INST: qc.ppregs
+# CHECK-ALIAS: qc.ppregs
+# CHECK-NOALIAS: slti zero, zero, 1792
# CHECK-ENC: encoding: [0x13,0x20,0x00,0x70]
qc.ppregs
-# CHECK-INST: qc.ppreg a0
+# CHECK-ALIAS: qc.ppreg a0
+# CHECK-NOALIAS: slti zero, a0, -2048
# CHECK-ENC: encoding: [0x13,0x20,0x05,0x80]
qc.ppreg x10
-# CHECK-INST: qc.pputc t2
+# CHECK-ALIAS: qc.pputc t2
+# CHECK-NOALIAS: slti zero, t2, -1792
# CHECK-ENC: encoding: [0x13,0xa0,0x03,0x90]
qc.pputc x7
-# CHECK-INST: qc.pputs a5
+# CHECK-ALIAS: qc.pputs a5
+# CHECK-NOALIAS: slti zero, a5, -1536
# CHECK-ENC: encoding: [0x13,0xa0,0x07,0xa0]
qc.pputs x15
-# CHECK-INST: qc.pexit s10
+# CHECK-ALIAS: qc.pexit s10
+# CHECK-NOALIAS: slti zero, s10, -1280
# CHECK-ENC: encoding: [0x13,0x20,0x0d,0xb0]
qc.pexit x26
-# CHECK-INST: qc.psyscall a1
+# CHECK-ALIAS: qc.psyscall a1
+# CHECK-NOALIAS: slti zero, a1, -1024
# CHECK-ENC: encoding: [0x13,0xa0,0x05,0xc0]
qc.psyscall x11
diff --git a/llvm/test/MC/RISCV/xqcisync-valid.s b/llvm/test/MC/RISCV/xqcisync-valid.s
index 84ea74b..5b4c506 100644
--- a/llvm/test/MC/RISCV/xqcisync-valid.s
+++ b/llvm/test/MC/RISCV/xqcisync-valid.s
@@ -3,14 +3,15 @@
# RUN: | FileCheck -check-prefixes=CHECK-ENC,CHECK-INST,CHECK-NOALIAS %s
# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+experimental-xqcisync < %s \
# RUN: | llvm-objdump --mattr=+experimental-xqcisync -M no-aliases --no-print-imm-hex -d - \
-# RUN: | FileCheck -check-prefix=CHECK-INST %s
+# RUN: | FileCheck -check-prefixes=CHECK-INST,CHECK-NOALIAS %s
# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-xqcisync -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ENC,CHECK-INST,CHECK-ALIAS %s
# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+experimental-xqcisync < %s \
# RUN: | llvm-objdump --mattr=+experimental-xqcisync --no-print-imm-hex -d - \
-# RUN: | FileCheck -check-prefix=CHECK-INST %s
+# RUN: | FileCheck -check-prefixes=CHECK-INST,CHECK-ALIAS %s
-# CHECK-INST: qc.c.delay 10
+# CHECK-NOALIAS: c.slli zero, 10
+# CHECK-ALIAS: qc.c.delay 10
# CHECK-ENC: encoding: [0x2a,0x00]
qc.c.delay 10
diff --git a/llvm/test/MC/X86/verify-callgraph-section.s b/llvm/test/MC/X86/verify-callgraph-section.s
new file mode 100644
index 0000000..ce07228
--- /dev/null
+++ b/llvm/test/MC/X86/verify-callgraph-section.s
@@ -0,0 +1,58 @@
+/// Test the callgraph section to make sure the indirect callsites
+/// (annotated by generated temporary labels .Ltmp*) are associated
+/// with the corresponding callee type identifiers.
+
+// RUN: llvm-mc -triple=x86_64 -filetype=obj -o - < %s | llvm-readelf -x .callgraph - | FileCheck %s
+
+ .text
+ .globl ball # -- Begin function ball
+ .p2align 4
+ .type ball,@function
+ball: # @ball
+.Lfunc_begin0:
+# %bb.0: # %entry
+ pushq %rbx
+ subq $32, %rsp
+ movl $0, 4(%rsp)
+ movq foo@GOTPCREL(%rip), %rcx
+ movq %rcx, 24(%rsp)
+ xorl %eax, %eax
+ callq *%rcx
+.Ltmp0:
+ movq bar@GOTPCREL(%rip), %rax
+ movq %rax, 16(%rsp)
+ movsbl 3(%rsp), %edi
+ callq *%rax
+.Ltmp1:
+ movq baz@GOTPCREL(%rip), %rax
+ movq %rax, 8(%rsp)
+ leaq 3(%rsp), %rbx
+ movq %rbx, %rdi
+ callq *%rax
+.Ltmp2:
+ callq foo@PLT
+ movsbl 3(%rsp), %edi
+ callq bar@PLT
+ movq %rbx, %rdi
+ callq baz@PLT
+ addq $32, %rsp
+ popq %rbx
+ retq
+ .section .callgraph,"o",@progbits,.text
+ .quad 0
+ .quad .Lfunc_begin0
+ .quad 1
+ .quad 3
+ /// MD5 hash of the callee type ID for foo.
+ // CHECK: 2444f731 f5eecb3e
+ .quad 0x3ecbeef531f74424
+ .quad .Ltmp0
+ /// MD5 hash of the callee type ID for bar.
+ // CHECK: 5486bc59 814b8e30
+ .quad 0x308e4b8159bc8654
+ .quad .Ltmp1
+ /// MD5 hash of the callee type ID for baz.
+ // CHECK: 7ade6814 f897fd77
+ .quad 0x77fd97f81468de7a
+ .quad .Ltmp2
+ .text
diff --git a/llvm/test/TableGen/get-named-operand-idx.td b/llvm/test/TableGen/get-named-operand-idx.td
index f5c5d93..ab23edd 100644
--- a/llvm/test/TableGen/get-named-operand-idx.td
+++ b/llvm/test/TableGen/get-named-operand-idx.td
@@ -72,14 +72,10 @@ def InstD : InstBase {
// CHECK: {0, 1, 2, -1, -1, },
// CHECK: {-1, -1, -1, 0, 1, },
// CHECK: };
-// CHECK: switch(Opcode) {
-// CHECK: case MyNamespace::InstA:
-// CHECK: return OperandMap[0][static_cast<unsigned>(Name)];
-// CHECK: case MyNamespace::InstB:
-// CHECK: case MyNamespace::InstC:
-// CHECK: return OperandMap[1][static_cast<unsigned>(Name)];
-// CHECK: default: return -1;
-// CHECK: }
+// CHECK: static constexpr uint8_t InstructionIndex[] = {
+// CHECK: 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+// CHECK: };
+// CHECK: return OperandMap[InstructionIndex[Opcode]][(unsigned)Name];
// CHECK: }
// CHECK: } // end namespace llvm::MyNamespace
// CHECK: #endif //GET_INSTRINFO_NAMED_OPS
diff --git a/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll b/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll
index da756e7..9eb9bda 100644
--- a/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll
+++ b/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll
@@ -30,6 +30,20 @@
; RUN: llvm-objdump -d %tout-read.1 | FileCheck %s --check-prefix=THUNK1
; RUN: llvm-objdump -d %tout-read.2 | FileCheck %s --check-prefix=THUNK2
+; It runs the same if we use -indexed-codegen-data-read-function-map-names=false.
+; RUN: llvm-lto2 run -enable-global-merge-func=true \
+; RUN: -indexed-codegen-data-read-function-map-names=false \
+; RUN: -codegen-data-use-path=%tout.cgdata \
+; RUN: %t-foo.bc %t-goo.bc -o %tout-read \
+; RUN: -r %t-foo.bc,_f1,px \
+; RUN: -r %t-goo.bc,_f2,px \
+; RUN: -r %t-foo.bc,_g,l -r %t-foo.bc,_g1,l -r %t-foo.bc,_g2,l \
+; RUN: -r %t-goo.bc,_g,l -r %t-goo.bc,_g1,l -r %t-goo.bc,_g2,l
+; RUN: llvm-nm %tout-read.1 | FileCheck %s --check-prefix=READ1
+; RUN: llvm-nm %tout-read.2 | FileCheck %s --check-prefix=READ2
+; RUN: llvm-objdump -d %tout-read.1 | FileCheck %s --check-prefix=THUNK1
+; RUN: llvm-objdump -d %tout-read.2 | FileCheck %s --check-prefix=THUNK2
+
; READ1: _f1.Tgm
; READ2: _f2.Tgm
diff --git a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
index 4d57199..bb3001e 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
@@ -190,6 +190,39 @@ return: ; preds = %entry, %if.end
ret i32 %retval.0
}
+define i32 @ctz3_with_i8gep(i32 %x) {
+; CHECK-LABEL: @ctz3_with_i8gep(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[IF_END:%.*]]
+; CHECK: if.end:
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.cttz.i32(i32 [[X]], i1 true)
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[TMP2]], [[IF_END]] ], [ 32, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+entry:
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %return, label %if.end
+
+if.end: ; preds = %entry
+ %sub = sub i32 0, %x
+ %and = and i32 %x, %sub
+ %mul = mul i32 %and, 81224991
+ %0 = lshr i32 %mul, 25
+ %1 = and i32 %0, 124
+ %arrayidx.idx = zext nneg i32 %1 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @ctz3.table, i64 %arrayidx.idx
+ %2 = load i32, ptr %arrayidx, align 4
+ br label %return
+
+return: ; preds = %if.end, %entry
+ %retval.0 = phi i32 [ %2, %if.end ], [ 32, %entry ]
+ ret i32 %retval.0
+}
+
+
@table = internal unnamed_addr constant [64 x i32] [i32 0, i32 1, i32 12, i32 2, i32 13, i32 22, i32 17, i32 3, i32 14, i32 33, i32 23, i32 36, i32 18, i32 58, i32 28, i32 4, i32 62, i32 15, i32 34, i32 26, i32 24, i32 48, i32 50, i32 37, i32 19, i32 55, i32 59, i32 52, i32 29, i32 44, i32 39, i32 5, i32 63, i32 11, i32 21, i32 16, i32 32, i32 35, i32 57, i32 27, i32 61, i32 25, i32 47, i32 49, i32 54, i32 51, i32 43, i32 38, i32 10, i32 20, i32 31, i32 56, i32 60, i32 46, i32 53, i32 42, i32 9, i32 30, i32 45, i32 41, i32 8, i32 40, i32 7, i32 6], align 4
define i32 @ctz4(i64 %b) {
@@ -276,3 +309,192 @@ entry:
%0 = load i32, ptr %arrayidx, align 4
ret i32 %0
}
+
+;; This has a wrong table size but is otherwise fine.
+@ctz9.table = internal unnamed_addr constant [128 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
+define i32 @ctz9(i32 %x) {
+; CHECK-LABEL: @ctz9(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds [128 x i8], ptr @ctz9.table, i64 0, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
+
+define i32 @ctz1_with_i8_gep(i32 %x) {
+; CHECK-LABEL: @ctz1_with_i8_gep(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds i8, ptr @ctz7.table, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
+
+; This is the same a ctz2 (i16 table) with an i8 gep making the indices invalid
+define i32 @ctz2_with_i8_gep(i32 %x) {
+; CHECK-LABEL: @ctz2_with_i8_gep(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[SUB]], [[X]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[AND]], 72416175
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[MUL]], 26
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[SHR]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i8], ptr @ctz2.table, i64 0, i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 72416175
+ %shr = lshr i32 %mul, 26
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds [64 x i8], ptr @ctz2.table, i64 0, i64 %idxprom
+ %0 = load i16, ptr %arrayidx, align 1
+ %conv = sext i16 %0 to i32
+ ret i32 %conv
+}
+
+; This is the same a ctz2_with_i8_gep but with the gep index multiplied by 2.
+define i32 @ctz2_with_i8_gep_fixed(i32 %x) {
+; CHECK-LABEL: @ctz2_with_i8_gep_fixed(
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false)
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+ %sub = sub i32 0, %x
+ %and = and i32 %x, %sub
+ %mul = mul i32 %and, 72416175
+ %shr = lshr i32 %mul, 25
+ %shr2 = and i32 %shr, 126
+ %1 = zext nneg i32 %shr2 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @ctz2.table, i64 %1
+ %2 = load i16, ptr %arrayidx, align 2
+ %conv = sext i16 %2 to i32
+ ret i32 %conv
+}
+
+; This is a i16 input with the debruijn table stored in a single i128.
+@tablei128 = internal unnamed_addr constant i128 16018378897745984667142067713738932480, align 16
+define i32 @cttz_i16_via_i128(i16 noundef %x) {
+; CHECK-LABEL: @cttz_i16_via_i128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i16 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP3]], i16 0, i16 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV6:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT: ret i32 [[CONV6]]
+;
+entry:
+ %sub = sub i16 0, %x
+ %and = and i16 %x, %sub
+ %mul = mul i16 %and, 2479
+ %0 = lshr i16 %mul, 12
+ %idxprom = zext nneg i16 %0 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @tablei128, i64 %idxprom
+ %1 = load i8, ptr %arrayidx, align 1
+ %conv6 = zext i8 %1 to i32
+ ret i32 %conv6
+}
+
+; Same as above but the table is a little off
+@tablei128b = internal unnamed_addr constant i128 16018378897745984667142068813250560256, align 16
+define i32 @cttz_i16_via_i128_incorrecttable(i16 noundef %x) {
+; CHECK-LABEL: @cttz_i16_via_i128_incorrecttable(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB:%.*]] = sub i16 0, [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i16 [[X]], [[SUB]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i16 [[AND]], 2479
+; CHECK-NEXT: [[TMP0:%.*]] = lshr i16 [[MUL]], 12
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext nneg i16 [[TMP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr @tablei128b, i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[CONV6:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: ret i32 [[CONV6]]
+;
+entry:
+ %sub = sub i16 0, %x
+ %and = and i16 %x, %sub
+ %mul = mul i16 %and, 2479
+ %0 = lshr i16 %mul, 12
+ %idxprom = zext nneg i16 %0 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @tablei128b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx, align 1
+ %conv6 = zext i8 %1 to i32
+ ret i32 %conv6
+}
+
+; Same as ctz1 but the table and load is very large
+@ctz7i128.table = internal unnamed_addr constant [32 x i128] [i128 0, i128 1, i128 28, i128 2, i128 29, i128 14, i128 24, i128 3, i128 30, i128 22, i128 20, i128 15, i128 25, i128 17, i128 4, i128 8, i128 31, i128 27, i128 13, i128 23, i128 21, i128 19, i128 16, i128 7, i128 26, i128 12, i128 18, i128 6, i128 11, i128 5, i128 10, i128 9], align 16
+define i128 @ctz1_i128(i32 %x) {
+; CHECK-LABEL: @ctz1_i128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i128
+; CHECK-NEXT: ret i128 [[TMP3]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds [32 x i128], ptr @ctz7i128.table, i64 0, i64 %idxprom
+ %l = load i128, ptr %arrayidx, align 1
+ ret i128 %l
+}
+
+; This is roughly the same as ctz1 but using i128.
+@table.i128 = internal unnamed_addr constant [128 x i8] c"\00\01e\02tf<\03|ug^R=!\04}yvWoh_5ZSE>0\22\14\05~rzPwmX.pkiI`K6\1Ab[TBMF?'81*#\1C\15\0E\06\7Fds;{]Q xVn4YD/\13qOl-jHJ\19aAL&7)\1B\0Dc:\\\1FU3C\12N,G\18@%(\0C9\1E2\11+\17$\0B\1D\10\16\0A\0F\09\08\07", align 1
+define i32 @src(i128 noundef %x) {
+; CHECK-LABEL: @src(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP3:%.*]] = call i128 @llvm.cttz.i128(i128 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i128 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i128 0, i128 [[TMP3]]
+; CHECK-NEXT: [[TMP0:%.*]] = trunc i128 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i128 0, %x
+ %and = and i128 %x, %sub
+ %mul = mul i128 %and, 2647824804797170443043024478319300753
+ %shr = lshr i128 %mul, 121
+ %idxprom = trunc i128 %shr to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @table.i128, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll b/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll
index 714acd7..90836db8 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll
@@ -66,7 +66,7 @@ entry:
;; This is a negative test with a wrong table size and constants.
-@ctz3.table = internal unnamed_addr constant [128 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
+@ctz3.table = internal unnamed_addr constant [128 x i8] c"\01\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
define i32 @ctz5(i32 %x) {
entry:
diff --git a/llvm/test/Transforms/GVN/PRE/load-metadata.ll b/llvm/test/Transforms/GVN/PRE/load-metadata.ll
index 415812b..1128b1b 100644
--- a/llvm/test/Transforms/GVN/PRE/load-metadata.ll
+++ b/llvm/test/Transforms/GVN/PRE/load-metadata.ll
@@ -1,14 +1,40 @@
-; RUN: opt -S -passes=gvn < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=gvn < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -S -passes='gvn<memoryssa>' < %s | FileCheck %s --check-prefixes=CHECK,MSSA
define i32 @test1(ptr %p, i1 %C) {
-; CHECK-LABEL: @test1(
+; MDEP-LABEL: define i32 @test1(
+; MDEP-SAME: ptr [[P:%.*]], i1 [[C:%.*]]) {
+; MDEP-NEXT: [[BLOCK1:.*:]]
+; MDEP-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; MDEP: [[BLOCK2]]:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P]], align 4, !range [[RNG0:![0-9]+]], !invariant.group [[META1:![0-9]+]]
+; MDEP-NEXT: br label %[[BLOCK4:.*]]
+; MDEP: [[BLOCK3]]:
+; MDEP-NEXT: store i32 0, ptr [[P]], align 4
+; MDEP-NEXT: br label %[[BLOCK4]]
+; MDEP: [[BLOCK4]]:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, %[[BLOCK3]] ], [ [[PRE_PRE]], %[[BLOCK2]] ]
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: define i32 @test1(
+; MSSA-SAME: ptr [[P:%.*]], i1 [[C:%.*]]) {
+; MSSA-NEXT: [[BLOCK1:.*:]]
+; MSSA-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; MSSA: [[BLOCK2]]:
+; MSSA-NEXT: br label %[[BLOCK4:.*]]
+; MSSA: [[BLOCK3]]:
+; MSSA-NEXT: store i32 0, ptr [[P]], align 4
+; MSSA-NEXT: br label %[[BLOCK4]]
+; MSSA: [[BLOCK4]]:
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P]], align 4, !range [[RNG0:![0-9]+]], !invariant.group [[META1:![0-9]+]]
+; MSSA-NEXT: ret i32 [[PRE]]
+;
block1:
- br i1 %C, label %block2, label %block3
+ br i1 %C, label %block2, label %block3
block2:
- br label %block4
-; CHECK: block2:
-; CHECK-NEXT: load i32, ptr %p, align 4, !range !0, !invariant.group !1
+ br label %block4
block3:
store i32 0, ptr %p
@@ -22,3 +48,12 @@ block4:
!0 = !{i32 40, i32 100}
!1 = !{!"magic ptr"}
+;.
+; MDEP: [[RNG0]] = !{i32 40, i32 100}
+; MDEP: [[META1]] = !{!"magic ptr"}
+;.
+; MSSA: [[RNG0]] = !{i32 40, i32 100}
+; MSSA: [[META1]] = !{!"magic ptr"}
+;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/load-pre-across-backedge.ll b/llvm/test/Transforms/GVN/PRE/load-pre-across-backedge.ll
index a3eae62..b677272 100644
--- a/llvm/test/Transforms/GVN/PRE/load-pre-across-backedge.ll
+++ b/llvm/test/Transforms/GVN/PRE/load-pre-across-backedge.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=gvn -S < %s | FileCheck %s
+; RUN: opt -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA
; Check that PRE-LOAD across backedge does not
; result in invalid dominator tree.
@@ -43,3 +44,6 @@ bb3:
call void @use(i32 %v)
br label %bb2
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/load-pre-nonlocal.ll b/llvm/test/Transforms/GVN/PRE/load-pre-nonlocal.ll
index b778d98..9dba73a 100644
--- a/llvm/test/Transforms/GVN/PRE/load-pre-nonlocal.ll
+++ b/llvm/test/Transforms/GVN/PRE/load-pre-nonlocal.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -o - -passes=gvn %s | FileCheck %s
+; RUN: opt -S -o - -passes=gvn %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -S -o - -passes='gvn<memoryssa>' %s | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@@ -143,3 +144,6 @@ if.end:
file: !12,
isOptimized: true, flags: "-O2",
splitDebugFilename: "abc.debug", emissionKind: 2)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/lpre-call-wrap.ll b/llvm/test/Transforms/GVN/PRE/lpre-call-wrap.ll
index 06a7f11..9b4eb60 100644
--- a/llvm/test/Transforms/GVN/PRE/lpre-call-wrap.ll
+++ b/llvm/test/Transforms/GVN/PRE/lpre-call-wrap.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=gvn -enable-load-pre < %s | FileCheck %s
+; RUN: opt -S -passes=gvn -enable-load-pre < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -S -passes='gvn<memoryssa>' -enable-load-pre < %s | FileCheck %s --check-prefixes=CHECK,MSSA
;
; Make sure the load in bb3.backedge is removed and moved into bb1 after the
; call. This makes the non-call case faster.
@@ -18,31 +19,56 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
%struct.A = type { i32, i32 }
define void @_Z12testfunctionR1A(ptr %iter) {
-; CHECK-LABEL: @_Z12testfunctionR1A(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ITER:%.*]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
-; CHECK-NEXT: br i1 [[TMP1]], label [[RETURN:%.*]], label [[BB_NPH:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [[STRUCT_A:%.*]], ptr [[ITER]], i32 0, i32 1
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[DOTRLE:%.*]] = phi i32 [ [[TMP0]], [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB3_BACKEDGE:%.*]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[DOTRLE]], 1
-; CHECK-NEXT: store i32 [[TMP3]], ptr [[ITER]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[TMP3]], [[TMP4]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[BB1:%.*]], label [[BB3_BACKEDGE]]
-; CHECK: bb1:
-; CHECK-NEXT: tail call void @_Z1gv()
-; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ITER]], align 4
-; CHECK-NEXT: br label [[BB3_BACKEDGE]]
-; CHECK: bb3.backedge:
-; CHECK-NEXT: [[TMP6]] = phi i32 [ [[DOTPRE]], [[BB1]] ], [ [[TMP3]], [[BB]] ]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 0
-; CHECK-NEXT: br i1 [[TMP7]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @_Z12testfunctionR1A(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = load i32, ptr [[ITER:%.*]], align 4
+; MDEP-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; MDEP-NEXT: br i1 [[TMP1]], label [[RETURN:%.*]], label [[BB_NPH:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP2:%.*]] = getelementptr [[STRUCT_A:%.*]], ptr [[ITER]], i32 0, i32 1
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[DOTRLE:%.*]] = phi i32 [ [[TMP0]], [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB3_BACKEDGE:%.*]] ]
+; MDEP-NEXT: [[TMP3:%.*]] = add i32 [[DOTRLE]], 1
+; MDEP-NEXT: store i32 [[TMP3]], ptr [[ITER]], align 4
+; MDEP-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4
+; MDEP-NEXT: [[TMP5:%.*]] = icmp eq i32 [[TMP3]], [[TMP4]]
+; MDEP-NEXT: br i1 [[TMP5]], label [[BB1:%.*]], label [[BB3_BACKEDGE]]
+; MDEP: bb1:
+; MDEP-NEXT: tail call void @_Z1gv()
+; MDEP-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ITER]], align 4
+; MDEP-NEXT: br label [[BB3_BACKEDGE]]
+; MDEP: bb3.backedge:
+; MDEP-NEXT: [[TMP6]] = phi i32 [ [[DOTPRE]], [[BB1]] ], [ [[TMP3]], [[BB]] ]
+; MDEP-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 0
+; MDEP-NEXT: br i1 [[TMP7]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @_Z12testfunctionR1A(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = load i32, ptr [[ITER:%.*]], align 4
+; MSSA-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; MSSA-NEXT: br i1 [[TMP1]], label [[RETURN:%.*]], label [[BB_NPH:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP2:%.*]] = getelementptr [[STRUCT_A:%.*]], ptr [[ITER]], i32 0, i32 1
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[DOTRLE:%.*]] = phi i32 [ [[TMP0]], [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB3_BACKEDGE:%.*]] ]
+; MSSA-NEXT: [[TMP3:%.*]] = add i32 [[DOTRLE]], 1
+; MSSA-NEXT: store i32 [[TMP3]], ptr [[ITER]], align 4
+; MSSA-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4
+; MSSA-NEXT: [[TMP5:%.*]] = icmp eq i32 [[TMP3]], [[TMP4]]
+; MSSA-NEXT: br i1 [[TMP5]], label [[BB1:%.*]], label [[BB3_BACKEDGE]]
+; MSSA: bb1:
+; MSSA-NEXT: tail call void @_Z1gv()
+; MSSA-NEXT: br label [[BB3_BACKEDGE]]
+; MSSA: bb3.backedge:
+; MSSA-NEXT: [[TMP6]] = load i32, ptr [[ITER]], align 4
+; MSSA-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 0
+; MSSA-NEXT: br i1 [[TMP7]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
%0 = getelementptr %struct.A, ptr %iter, i32 0, i32 0 ; <ptr> [#uses=3]
@@ -76,3 +102,5 @@ return: ; preds = %bb3.backedge, %entry
}
declare void @_Z1gv()
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/rle-addrspace-cast.ll b/llvm/test/Transforms/GVN/PRE/rle-addrspace-cast.ll
index 7f67b2b..6c79b02 100644
--- a/llvm/test/Transforms/GVN/PRE/rle-addrspace-cast.ll
+++ b/llvm/test/Transforms/GVN/PRE/rle-addrspace-cast.ll
@@ -1,6 +1,23 @@
-; RUN: opt < %s -data-layout="e-p:32:32:32-p1:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -passes=gvn,dce -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -data-layout="e-p:32:32:32-p1:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -passes=gvn,dce -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -data-layout="e-p:32:32:32-p1:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -passes='gvn<memoryssa>',dce -S | FileCheck %s --check-prefixes=CHECK,MSSA
define i8 @coerce_offset0_addrspacecast(i32 %V, ptr %P) {
+; MDEP-LABEL: define i8 @coerce_offset0_addrspacecast(
+; MDEP-SAME: i32 [[V:%.*]], ptr [[P:%.*]]) {
+; MDEP-NEXT: store i32 [[V]], ptr [[P]], align 4
+; MDEP-NEXT: [[TMP1:%.*]] = lshr i32 [[V]], 16
+; MDEP-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
+; MDEP-NEXT: ret i8 [[TMP2]]
+;
+; MSSA-LABEL: define i8 @coerce_offset0_addrspacecast(
+; MSSA-SAME: i32 [[V:%.*]], ptr [[P:%.*]]) {
+; MSSA-NEXT: store i32 [[V]], ptr [[P]], align 4
+; MSSA-NEXT: [[P2:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(1)
+; MSSA-NEXT: [[P3:%.*]] = getelementptr i8, ptr addrspace(1) [[P2]], i32 2
+; MSSA-NEXT: [[A:%.*]] = load i8, ptr addrspace(1) [[P3]], align 1
+; MSSA-NEXT: ret i8 [[A]]
+;
store i32 %V, ptr %P
%P2 = addrspacecast ptr %P to ptr addrspace(1)
@@ -8,7 +25,6 @@ define i8 @coerce_offset0_addrspacecast(i32 %V, ptr %P) {
%A = load i8, ptr addrspace(1) %P3
ret i8 %A
-; CHECK-LABEL: @coerce_offset0_addrspacecast(
-; CHECK-NOT: load
-; CHECK: ret i8
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/rle-semidominated.ll b/llvm/test/Transforms/GVN/PRE/rle-semidominated.ll
index e927f37..4eb090e 100644
--- a/llvm/test/Transforms/GVN/PRE/rle-semidominated.ll
+++ b/llvm/test/Transforms/GVN/PRE/rle-semidominated.ll
@@ -1,13 +1,45 @@
-; RUN: opt < %s -passes=gvn -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -S | FileCheck %s --check-prefixes=CHECK,MSSA
define i32 @main(ptr %p, i32 %x, i32 %y) {
+; MDEP-LABEL: define i32 @main(
+; MDEP-SAME: ptr [[P:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MDEP-NEXT: [[BLOCK1:.*:]]
+; MDEP-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[Y]]
+; MDEP-NEXT: br i1 [[CMP]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; MDEP: [[BLOCK2]]:
+; MDEP-NEXT: [[DEAD_PRE:%.*]] = load i32, ptr [[P]], align 4
+; MDEP-NEXT: br label %[[BLOCK4:.*]]
+; MDEP: [[BLOCK3]]:
+; MDEP-NEXT: store i32 0, ptr [[P]], align 4
+; MDEP-NEXT: br label %[[BLOCK4]]
+; MDEP: [[BLOCK4]]:
+; MDEP-NEXT: [[DEAD:%.*]] = phi i32 [ 0, %[[BLOCK3]] ], [ [[DEAD_PRE]], %[[BLOCK2]] ]
+; MDEP-NEXT: ret i32 [[DEAD]]
+;
+; MSSA-LABEL: define i32 @main(
+; MSSA-SAME: ptr [[P:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MSSA-NEXT: [[BLOCK1:.*:]]
+; MSSA-NEXT: [[Z:%.*]] = load i32, ptr [[P]], align 4
+; MSSA-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[Y]]
+; MSSA-NEXT: br i1 [[CMP]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; MSSA: [[BLOCK2]]:
+; MSSA-NEXT: br label %[[BLOCK4:.*]]
+; MSSA: [[BLOCK3]]:
+; MSSA-NEXT: store i32 0, ptr [[P]], align 4
+; MSSA-NEXT: br label %[[BLOCK4]]
+; MSSA: [[BLOCK4]]:
+; MSSA-NEXT: [[DEAD:%.*]] = load i32, ptr [[P]], align 4
+; MSSA-NEXT: ret i32 [[DEAD]]
+;
block1:
%z = load i32, ptr %p
%cmp = icmp eq i32 %x, %y
- br i1 %cmp, label %block2, label %block3
+ br i1 %cmp, label %block2, label %block3
block2:
- br label %block4
+ br label %block4
block3:
%b = bitcast i32 0 to i32
@@ -19,18 +51,5 @@ block4:
ret i32 %DEAD
}
-; CHECK: define i32 @main(ptr %p, i32 %x, i32 %y) {
-; CHECK-NEXT: block1:
-; CHECK-NOT: %z = load i32, ptr %p
-; CHECK-NEXT: %cmp = icmp eq i32 %x, %y
-; CHECK-NEXT: br i1 %cmp, label %block2, label %block3
-; CHECK: block2:
-; CHECK-NEXT: %DEAD.pre = load i32, ptr %p
-; CHECK-NEXT: br label %block4
-; CHECK: block3:
-; CHECK-NEXT: store i32 0, ptr %p
-; CHECK-NEXT: br label %block4
-; CHECK: block4:
-; CHECK-NEXT: %DEAD = phi i32 [ 0, %block3 ], [ %DEAD.pre, %block2 ]
-; CHECK-NEXT: ret i32 %DEAD
-; CHECK-NEXT: }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll b/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll
new file mode 100644
index 0000000..640c910
--- /dev/null
+++ b/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -p indvars -S %s | FileCheck %s
+
+target triple = "arm64-apple-macosx15.0.0"
+
+declare i1 @cond()
+
+define void @pred_mip_12(ptr %dst, ptr %src, i32 %n, i64 %offset) {
+; CHECK-LABEL: define void @pred_mip_12(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i32 [[N:%.*]], i64 [[OFFSET:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N]], i32 1)
+; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[SMAX]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[OFFSET]], [[TMP0]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]]
+; CHECK-NEXT: br label %[[OUTER_LOOP:.*]]
+; CHECK: [[OUTER_LOOP_LOOPEXIT:.*]]:
+; CHECK-NEXT: br label %[[OUTER_LOOP]]
+; CHECK: [[OUTER_LOOP]]:
+; CHECK-NEXT: [[OUTER_PTR:%.*]] = phi ptr [ [[SRC]], %[[ENTRY]] ], [ [[SCEVGEP]], %[[OUTER_LOOP_LOOPEXIT]] ]
+; CHECK-NEXT: [[C:%.*]] = call i1 @cond()
+; CHECK-NEXT: br i1 [[C]], label %[[INNER_LOOP_PREHEADER:.*]], label %[[EXIT:.*]]
+; CHECK: [[INNER_LOOP_PREHEADER]]:
+; CHECK-NEXT: br label %[[INNER_LOOP:.*]]
+; CHECK: [[INNER_LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[INNER_LOOP]] ], [ 0, %[[INNER_LOOP_PREHEADER]] ]
+; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[OUTER_PTR]], align 1
+; CHECK-NEXT: store i8 [[L]], ptr [[DST]], align 2
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_NEXT]], [[SMAX]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[INNER_LOOP]], label %[[OUTER_LOOP_LOOPEXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %outer.loop
+
+outer.loop:
+ %outer.ptr = phi ptr [ %src, %entry ], [ %ptr.iv.next, %inner.loop ]
+ %c = call i1 @cond()
+ br i1 %c, label %inner.loop, label %exit
+
+inner.loop:
+ %iv = phi i32 [ 0, %outer.loop ], [ %iv.next, %inner.loop ]
+ %ptr.iv = phi ptr [ %src, %outer.loop ], [ %ptr.iv.next, %inner.loop ]
+ %l = load i8, ptr %outer.ptr, align 1
+ %ptr.iv.next = getelementptr i8, ptr %ptr.iv, i64 %offset
+ store i8 %l, ptr %dst, align 2
+ %iv.next = add i32 %iv, 1
+ %ec = icmp slt i32 %iv.next, %n
+ br i1 %ec, label %inner.loop, label %outer.loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
index d24f9a4..17921af 100644
--- a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
+++ b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
@@ -15,11 +15,9 @@ define void @_Z3fn1v() {
; CHECK-NEXT: [[J_SROA_0_0_COPYLOAD:%.*]] = load i8, ptr [[X5]], align 1
; CHECK-NEXT: br label [[DOTPREHEADER4_LR_PH:%.*]]
; CHECK: .preheader4.lr.ph:
-; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[X4]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
; CHECK-NEXT: [[TMP4:%.*]] = sext i8 [[J_SROA_0_0_COPYLOAD]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[X4]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], [[TMP2]]
; CHECK-NEXT: br label [[DOTPREHEADER4:%.*]]
; CHECK: .preheader4:
; CHECK-NEXT: [[K_09:%.*]] = phi ptr [ undef, [[DOTPREHEADER4_LR_PH]] ], [ [[X25:%.*]], [[X22:%.*]] ]
diff --git a/llvm/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll b/llvm/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll
index 608a158..ba36005 100644
--- a/llvm/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll
+++ b/llvm/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll
@@ -1,17 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
define ptr @foo(ptr %x, i32 %n) {
-; CHECK-LABEL: @foo(
-; CHECK-NOT: getelementptr
+; CHECK-LABEL: define ptr @foo(
+; CHECK-SAME: ptr [[X:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: ret ptr [[X]]
+;
%p = getelementptr {}, ptr %x, i32 %n
ret ptr %p
}
define ptr @bar(i64 %n, ptr %p) {
-; CHECK-LABEL: @bar(
+; CHECK-LABEL: define ptr @bar(
+; CHECK-SAME: i64 [[N:%.*]], ptr [[P:%.*]]) {
+; CHECK-NEXT: [[G:%.*]] = getelementptr { {}, [0 x { [0 x i8] }] }, ptr [[P]], i64 0, i32 1, i64 0, i32 0, i64 [[N]]
+; CHECK-NEXT: ret ptr [[G]]
+;
%g = getelementptr {{}, [0 x {[0 x i8]}]}, ptr %p, i64 %n, i32 1, i64 %n, i32 0, i64 %n
-; CHECK: %p, i64 0, i32 1, i64 0, i32 0, i64 %n
ret ptr %g
}
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll b/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll
index 1520d6c..07c8a8c 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll
@@ -35,7 +35,9 @@ define ptr @xzy(i64 %x, i64 %y, i64 %z) {
; CHECK-LABEL: define ptr @xzy(
; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [10 x [10 x [10 x i32]]], ptr getelementptr inbounds nuw (i8, ptr @glob, i64 40), i64 0, i64 [[X]], i64 [[Z]], i64 [[Y]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [10 x [10 x [10 x i32]]], ptr getelementptr inbounds nuw (i8, ptr @glob, i64 40), i64 0, i64 [[X]]
+; CHECK-NEXT: [[GEP_SPLIT1:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[GEP_SPLIT]], i64 0, i64 [[Z]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [10 x i32], ptr [[GEP_SPLIT1]], i64 0, i64 [[Y]]
; CHECK-NEXT: ret ptr [[GEP]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
index 934852d..02042b1 100644
--- a/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
+++ b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
@@ -131,10 +131,10 @@ define i1 @test5(double %x, i1 %cond) {
; CHECK: if.then:
; CHECK-NEXT: ret i1 false
; CHECK: if.end:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f64(double [[X]], i32 408)
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[Y:%.*]] = phi double [ -1.000000e+00, [[ENTRY:%.*]] ], [ [[X]], [[IF_END]] ]
-; CHECK-NEXT: [[RET:%.*]] = tail call i1 @llvm.is.fpclass.f64(double [[Y]], i32 408)
+; CHECK-NEXT: [[RET:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[TMP0]], [[IF_END]] ]
; CHECK-NEXT: ret i1 [[RET]]
;
entry:
@@ -391,11 +391,9 @@ define float @test_signbit_check_fail(float %x, i1 %cond) {
; CHECK: if.else:
; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN2:%.*]], label [[IF_END]]
; CHECK: if.then2:
-; CHECK-NEXT: [[FNEG2:%.*]] = fneg float [[X]]
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[VALUE:%.*]] = phi float [ [[FNEG]], [[IF_THEN1]] ], [ [[FNEG2]], [[IF_THEN2]] ], [ [[X]], [[IF_ELSE]] ]
-; CHECK-NEXT: [[RET:%.*]] = call float @llvm.fabs.f32(float [[VALUE]])
+; CHECK-NEXT: [[RET:%.*]] = phi float [ [[FNEG]], [[IF_THEN1]] ], [ [[X]], [[IF_THEN2]] ], [ [[X]], [[IF_ELSE]] ]
; CHECK-NEXT: ret float [[RET]]
;
%i32 = bitcast float %x to i32
diff --git a/llvm/test/Transforms/InstCombine/gep-vector.ll b/llvm/test/Transforms/InstCombine/gep-vector.ll
index 5546cb3..313c7ef 100644
--- a/llvm/test/Transforms/InstCombine/gep-vector.ll
+++ b/llvm/test/Transforms/InstCombine/gep-vector.ll
@@ -31,7 +31,8 @@ define <2 x ptr> @vectorindex3() {
define ptr @bitcast_vec_to_array_gep(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_vec_to_array_gep(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr [7 x i32], ptr [[X:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [7 x i32], ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [7 x i32], ptr [[GEP_SPLIT]], i64 0, i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%gep = getelementptr [7 x i32], ptr %x, i64 %y, i64 %z
@@ -42,7 +43,8 @@ define ptr @bitcast_vec_to_array_gep(ptr %x, i64 %y, i64 %z) {
define ptr @bitcast_array_to_vec_gep(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_array_to_vec_gep(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <3 x i32>, ptr [[X:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds <3 x i32>, ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <3 x i32>, ptr [[GEP_SPLIT]], i64 0, i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%gep = getelementptr inbounds <3 x i32>, ptr %x, i64 %y, i64 %z
@@ -53,7 +55,8 @@ define ptr @bitcast_array_to_vec_gep(ptr %x, i64 %y, i64 %z) {
define ptr @bitcast_vec_to_array_gep_matching_alloc_size(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_vec_to_array_gep_matching_alloc_size(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i32], ptr [[X:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [4 x i32], ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i32], ptr [[GEP_SPLIT]], i64 0, i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%gep = getelementptr [4 x i32], ptr %x, i64 %y, i64 %z
@@ -64,7 +67,8 @@ define ptr @bitcast_vec_to_array_gep_matching_alloc_size(ptr %x, i64 %y, i64 %z)
define ptr @bitcast_array_to_vec_gep_matching_alloc_size(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_array_to_vec_gep_matching_alloc_size(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <4 x i32>, ptr [[X:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds <4 x i32>, ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <4 x i32>, ptr [[GEP_SPLIT]], i64 0, i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%gep = getelementptr inbounds <4 x i32>, ptr %x, i64 %y, i64 %z
@@ -76,7 +80,8 @@ define ptr @bitcast_array_to_vec_gep_matching_alloc_size(ptr %x, i64 %y, i64 %z)
define ptr addrspace(3) @bitcast_vec_to_array_addrspace(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_vec_to_array_addrspace(
; CHECK-NEXT: [[ASC:%.*]] = addrspacecast ptr [[X:%.*]] to ptr addrspace(3)
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr [7 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [7 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [7 x i32], ptr addrspace(3) [[GEP_SPLIT]], i64 0, i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr addrspace(3) [[GEP]]
;
%asc = addrspacecast ptr %x to ptr addrspace(3)
@@ -89,7 +94,8 @@ define ptr addrspace(3) @bitcast_vec_to_array_addrspace(ptr %x, i64 %y, i64 %z)
define ptr addrspace(3) @inbounds_bitcast_vec_to_array_addrspace(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @inbounds_bitcast_vec_to_array_addrspace(
; CHECK-NEXT: [[ASC:%.*]] = addrspacecast ptr [[X:%.*]] to ptr addrspace(3)
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [7 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [7 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [7 x i32], ptr addrspace(3) [[GEP_SPLIT]], i64 0, i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr addrspace(3) [[GEP]]
;
%asc = addrspacecast ptr %x to ptr addrspace(3)
@@ -102,7 +108,8 @@ define ptr addrspace(3) @inbounds_bitcast_vec_to_array_addrspace(ptr %x, i64 %y,
define ptr addrspace(3) @bitcast_vec_to_array_addrspace_matching_alloc_size(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @bitcast_vec_to_array_addrspace_matching_alloc_size(
; CHECK-NEXT: [[ASC:%.*]] = addrspacecast ptr [[X:%.*]] to ptr addrspace(3)
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr [4 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i32], ptr addrspace(3) [[GEP_SPLIT]], i64 0, i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr addrspace(3) [[GEP]]
;
%asc = addrspacecast ptr %x to ptr addrspace(3)
@@ -115,7 +122,8 @@ define ptr addrspace(3) @bitcast_vec_to_array_addrspace_matching_alloc_size(ptr
define ptr addrspace(3) @inbounds_bitcast_vec_to_array_addrspace_matching_alloc_size(ptr %x, i64 %y, i64 %z) {
; CHECK-LABEL: @inbounds_bitcast_vec_to_array_addrspace_matching_alloc_size(
; CHECK-NEXT: [[ASC:%.*]] = addrspacecast ptr [[X:%.*]] to ptr addrspace(3)
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]], i64 [[Z:%.*]]
+; CHECK-NEXT: [[GEP_SPLIT:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(3) [[ASC]], i64 [[Y:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(3) [[GEP_SPLIT]], i64 0, i64 [[Z:%.*]]
; CHECK-NEXT: ret ptr addrspace(3) [[GEP]]
;
%asc = addrspacecast ptr %x to ptr addrspace(3)
diff --git a/llvm/test/Transforms/InstCombine/gepphigep.ll b/llvm/test/Transforms/InstCombine/gepphigep.ll
index 6b5dae9..cd1e38b 100644
--- a/llvm/test/Transforms/InstCombine/gepphigep.ll
+++ b/llvm/test/Transforms/InstCombine/gepphigep.ll
@@ -6,124 +6,124 @@
%struct3 = type { i32, %struct4, %struct4 }
%struct4 = type { %struct2, %struct2 }
-define i32 @test1(ptr %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
+define i32 @test1(ptr %dm, i1 %c, i64 %idx1, i64 %idx2) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DM:%.*]], align 8
-; CHECK-NEXT: br i1 [[TMP4:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK-NEXT: [[INST1:%.*]] = load ptr, ptr [[DM:%.*]], align 8
+; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], ptr [[TMP1]], i64 [[TMP9:%.*]]
-; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 4
+; CHECK-NEXT: [[INST10:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], ptr [[INST1]], i64 [[IDX1:%.*]]
+; CHECK-NEXT: store i32 0, ptr [[INST10]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[TMP1]], i64 [[TMP19:%.*]]
-; CHECK-NEXT: store i32 0, ptr [[TMP20]], align 4
+; CHECK-NEXT: [[INST20:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[INST1]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: store i32 0, ptr [[INST20]], align 4
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
-; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP9]], [[BB1]] ], [ [[TMP19]], [[BB2]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[TMP1]], i64 [[TMP0]], i32 1
-; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
-; CHECK-NEXT: ret i32 [[TMP25]]
+; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[IDX1]], [[BB1]] ], [ [[IDX2]], [[BB2]] ]
+; CHECK-NEXT: [[INST24:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[INST1]], i64 [[TMP0]], i32 1
+; CHECK-NEXT: [[INST25:%.*]] = load i32, ptr [[INST24]], align 4
+; CHECK-NEXT: ret i32 [[INST25]]
;
bb:
- %tmp1 = load ptr, ptr %dm, align 8
- br i1 %tmp4, label %bb1, label %bb2
+ %inst1 = load ptr, ptr %dm, align 8
+ br i1 %c, label %bb1, label %bb2
bb1:
- %tmp10 = getelementptr inbounds %struct2, ptr %tmp1, i64 %tmp9
- store i32 0, ptr %tmp10, align 4
+ %inst10 = getelementptr inbounds %struct2, ptr %inst1, i64 %idx1
+ store i32 0, ptr %inst10, align 4
br label %bb3
bb2:
- %tmp20 = getelementptr inbounds %struct2, ptr %tmp1, i64 %tmp19
- store i32 0, ptr %tmp20, align 4
+ %inst20 = getelementptr inbounds %struct2, ptr %inst1, i64 %idx2
+ store i32 0, ptr %inst20, align 4
br label %bb3
bb3:
- %phi = phi ptr [ %tmp10, %bb1 ], [ %tmp20, %bb2 ]
- %tmp24 = getelementptr inbounds %struct2, ptr %phi, i64 0, i32 1
- %tmp25 = load i32, ptr %tmp24, align 4
- ret i32 %tmp25
+ %phi = phi ptr [ %inst10, %bb1 ], [ %inst20, %bb2 ]
+ %inst24 = getelementptr inbounds %struct2, ptr %phi, i64 0, i32 1
+ %inst25 = load i32, ptr %inst24, align 4
+ ret i32 %inst25
}
-define i32 @test2(ptr %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
+define i32 @test2(ptr %dm, i64 %idx1, i64 %idx2) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DM:%.*]], align 8
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], ptr [[TMP1]], i64 [[TMP9:%.*]]
-; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 4
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[TMP1]], i64 [[TMP19:%.*]]
-; CHECK-NEXT: store i32 0, ptr [[TMP20]], align 4
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP10]], i64 4
-; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
-; CHECK-NEXT: ret i32 [[TMP25]]
+; CHECK-NEXT: [[INST1:%.*]] = load ptr, ptr [[DM:%.*]], align 8
+; CHECK-NEXT: [[INST10:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], ptr [[INST1]], i64 [[IDX1:%.*]]
+; CHECK-NEXT: store i32 0, ptr [[INST10]], align 4
+; CHECK-NEXT: [[INST20:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[INST1]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: store i32 0, ptr [[INST20]], align 4
+; CHECK-NEXT: [[INST24:%.*]] = getelementptr inbounds nuw i8, ptr [[INST10]], i64 4
+; CHECK-NEXT: [[INST25:%.*]] = load i32, ptr [[INST24]], align 4
+; CHECK-NEXT: ret i32 [[INST25]]
;
bb:
- %tmp1 = load ptr, ptr %dm, align 8
- %tmp10 = getelementptr inbounds %struct2, ptr %tmp1, i64 %tmp9
- store i32 0, ptr %tmp10, align 4
- %tmp20 = getelementptr inbounds %struct2, ptr %tmp1, i64 %tmp19
- store i32 0, ptr %tmp20, align 4
- %tmp24 = getelementptr inbounds %struct2, ptr %tmp10, i64 0, i32 1
- %tmp25 = load i32, ptr %tmp24, align 4
- ret i32 %tmp25
+ %inst1 = load ptr, ptr %dm, align 8
+ %inst10 = getelementptr inbounds %struct2, ptr %inst1, i64 %idx1
+ store i32 0, ptr %inst10, align 4
+ %inst20 = getelementptr inbounds %struct2, ptr %inst1, i64 %idx2
+ store i32 0, ptr %inst20, align 4
+ %inst24 = getelementptr inbounds %struct2, ptr %inst10, i64 0, i32 1
+ %inst25 = load i32, ptr %inst24, align 4
+ ret i32 %inst25
}
; Check that instcombine doesn't insert GEPs before landingpad.
-define i32 @test3(ptr %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19, i64 %tmp20, i64 %tmp21) personality ptr @__gxx_personality_v0 {
+define i32 @test3(ptr %dm, i1 %c, i64 %idx1, i64 %idx2, i64 %idx3) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @test3(
; CHECK-NEXT: bb:
-; CHECK-NEXT: br i1 [[TMP4:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT3:%.*]], ptr [[DM:%.*]], i64 [[TMP19:%.*]], i32 1
-; CHECK-NEXT: store i32 0, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[INST1:%.*]] = getelementptr inbounds [[STRUCT3:%.*]], ptr [[DM:%.*]], i64 [[IDX1:%.*]], i32 1
+; CHECK-NEXT: store i32 0, ptr [[INST1]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[TMP20:%.*]], i32 1, i32 0, i32 1
-; CHECK-NEXT: store i32 0, ptr [[TMP12]], align 4
+; CHECK-NEXT: [[INST12:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[IDX2:%.*]], i32 1, i32 0, i32 1
+; CHECK-NEXT: store i32 0, ptr [[INST12]], align 4
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
-; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP19]], [[BB1]] ], [ [[TMP20]], [[BB2]] ]
-; CHECK-NEXT: [[TMP22:%.*]] = invoke i32 @foo1(i32 11)
+; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[IDX1]], [[BB1]] ], [ [[IDX2]], [[BB2]] ]
+; CHECK-NEXT: [[INST22:%.*]] = invoke i32 @foo1(i32 11)
; CHECK-NEXT: to label [[BB4:%.*]] unwind label [[BB5:%.*]]
; CHECK: bb4:
; CHECK-NEXT: ret i32 0
; CHECK: bb5:
-; CHECK-NEXT: [[TMP27:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: [[INST27:%.*]] = landingpad { ptr, i32 }
; CHECK-NEXT: catch ptr @_ZTIi
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[TMP0]], i32 1
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT4:%.*]], ptr [[TMP1]], i64 [[TMP21:%.*]], i32 1, i32 1
-; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP35]], align 4
-; CHECK-NEXT: ret i32 [[TMP25]]
+; CHECK-NEXT: [[INST35:%.*]] = getelementptr inbounds [[STRUCT4:%.*]], ptr [[TMP1]], i64 [[IDX3:%.*]], i32 1, i32 1
+; CHECK-NEXT: [[INST25:%.*]] = load i32, ptr [[INST35]], align 4
+; CHECK-NEXT: ret i32 [[INST25]]
;
bb:
- br i1 %tmp4, label %bb1, label %bb2
+ br i1 %c, label %bb1, label %bb2
bb1:
- %tmp1 = getelementptr inbounds %struct3, ptr %dm, i64 %tmp19, i32 1
- store i32 0, ptr %tmp1, align 4
+ %inst1 = getelementptr inbounds %struct3, ptr %dm, i64 %idx1, i32 1
+ store i32 0, ptr %inst1, align 4
br label %bb3
bb2:
- %tmp2 = getelementptr inbounds %struct3, ptr %dm, i64 %tmp20, i32 1
- %tmp12 = getelementptr inbounds %struct4, ptr %tmp2, i64 0, i32 0, i32 1
- store i32 0, ptr %tmp12, align 4
+ %inst2 = getelementptr inbounds %struct3, ptr %dm, i64 %idx2, i32 1
+ %inst12 = getelementptr inbounds %struct4, ptr %inst2, i64 0, i32 0, i32 1
+ store i32 0, ptr %inst12, align 4
br label %bb3
bb3:
- %phi = phi ptr [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
- %tmp22 = invoke i32 @foo1(i32 11) to label %bb4 unwind label %bb5
+ %phi = phi ptr [ %inst1, %bb1 ], [ %inst2, %bb2 ]
+ %inst22 = invoke i32 @foo1(i32 11) to label %bb4 unwind label %bb5
bb4:
ret i32 0
bb5:
- %tmp27 = landingpad { ptr, i32 } catch ptr @_ZTIi
- %tmp34 = getelementptr inbounds %struct4, ptr %phi, i64 %tmp21, i32 1
- %tmp35 = getelementptr inbounds %struct2, ptr %tmp34, i64 0, i32 1
- %tmp25 = load i32, ptr %tmp35, align 4
- ret i32 %tmp25
+ %inst27 = landingpad { ptr, i32 } catch ptr @_ZTIi
+ %inst34 = getelementptr inbounds %struct4, ptr %phi, i64 %idx3, i32 1
+ %inst35 = getelementptr inbounds %struct2, ptr %inst34, i64 0, i32 1
+ %inst25 = load i32, ptr %inst35, align 4
+ ret i32 %inst25
}
@_ZTIi = external constant ptr
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index bb0a94c..55b5b5e 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -356,7 +356,7 @@ define i1 @test13_i16(i16 %X, ptr %P) {
define i1 @test13_i128(i128 %X, ptr %P) {
; CHECK-LABEL: @test13_i128(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i128 [[X:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i128 [[X:%.*]] to i64
; CHECK-NEXT: [[C:%.*]] = icmp eq i64 [[TMP1]], -1
; CHECK-NEXT: ret i1 [[C]]
;
@@ -412,7 +412,7 @@ define ptr @test_index_canon_inbounds(ptr %X, i32 %Idx) {
define ptr @test_index_canon_nusw_nuw(ptr %X, i32 %Idx) {
; CHECK-LABEL: @test_index_canon_nusw_nuw(
-; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[IDX:%.*]] to i64
; CHECK-NEXT: [[R:%.*]] = getelementptr nusw nuw i32, ptr [[X:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[R]]
;
@@ -568,7 +568,7 @@ define i32 @test20(ptr %P, i32 %A, i32 %B) {
define i32 @test20_as1(ptr addrspace(1) %P, i32 %A, i32 %B) {
; CHECK-LABEL: @test20_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[A:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i32 [[A:%.*]] to i16
; CHECK-NEXT: [[T6:%.*]] = icmp eq i16 [[TMP1]], 0
; CHECK-NEXT: [[T7:%.*]] = zext i1 [[T6]] to i32
; CHECK-NEXT: ret i32 [[T7]]
@@ -1380,7 +1380,7 @@ define ptr @gep_of_gep_multiuse_var_and_var(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_of_gep_multiuse_var_and_var(
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [4 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]]
; CHECK-NEXT: call void @use(ptr [[GEP1]])
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [4 x i32], ptr [[P]], i64 [[IDX]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr [4 x i32], ptr [[GEP1]], i64 0, i64 [[IDX2:%.*]]
; CHECK-NEXT: ret ptr [[GEP2]]
;
%gep1 = getelementptr [4 x i32], ptr %p, i64 %idx
@@ -1922,8 +1922,9 @@ define ptr @gep_merge_nusw(ptr %p, i64 %x, i64 %y) {
define ptr @gep_merge_nuw_add_zero(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_merge_nuw_add_zero(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]], i64 [[IDX2:%.*]]
-; CHECK-NEXT: ret ptr [[GEP]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw [2 x i32], ptr [[GEP_SPLIT:%.*]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw [2 x i32], ptr [[GEP]], i64 0, i64 [[IDX3:%.*]]
+; CHECK-NEXT: ret ptr [[GEP1]]
;
%gep1 = getelementptr nuw [2 x i32], ptr %p, i64 %idx
%gep = getelementptr nuw [2 x i32], ptr %gep1, i64 0, i64 %idx2
@@ -1935,7 +1936,8 @@ define ptr @gep_merge_nuw_add_zero(ptr %p, i64 %idx, i64 %idx2) {
; after the merge.
define ptr @gep_merge_nusw_add_zero(ptr %p, i64 %idx, i64 %idx2) {
; CHECK-LABEL: @gep_merge_nusw_add_zero(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]], i64 [[IDX2:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw [2 x i32], ptr [[P:%.*]], i64 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw [2 x i32], ptr [[GEP1]], i64 0, i64 [[IDX2:%.*]]
; CHECK-NEXT: ret ptr [[GEP]]
;
%gep1 = getelementptr nusw [2 x i32], ptr %p, i64 %idx
@@ -1976,4 +1978,94 @@ define ptr @gep_merge_nusw_const(ptr %p, i64 %idx, i64 %idx2) {
ret ptr %gep
}
+define ptr @gep_index_trunc_nothing(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nothing(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_nuw(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_nusw(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nusw(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_inbounds(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_inbounds(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr inbounds i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_nusw_nuw(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nusw_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw nuw i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nothing(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nothing(
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nuw(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nusw(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nusw(
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nusw_nuw(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nusw_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw nuw i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
!0 = !{!"branch_weights", i32 2, i32 10}
diff --git a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
index 1296dc6..f873551 100644
--- a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
@@ -39,9 +39,9 @@ define i1 @test59_as1(ptr addrspace(1) %foo) {
define i1 @test60(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[I:%.*]] to i32
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i32 [[TMP1]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = trunc nsw i64 [[J:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -53,9 +53,9 @@ define i1 @test60(ptr %foo, i64 %i, i64 %j) {
define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[I:%.*]] to i16
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = trunc nsw i64 [[J:%.*]] to i16
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -69,7 +69,7 @@ define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
; bitcast. This uses the same sized addrspace.
define i1 @test60_addrspacecast(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_addrspacecast(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[J:%.*]] to i32
; CHECK-NEXT: [[I_TR:%.*]] = trunc i64 [[I:%.*]] to i32
; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[I_TR]], 2
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], [[TMP1]]
diff --git a/llvm/test/Transforms/InstCombine/icmp-gep.ll b/llvm/test/Transforms/InstCombine/icmp-gep.ll
index aede844..1385dc3 100644
--- a/llvm/test/Transforms/InstCombine/icmp-gep.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-gep.ll
@@ -286,8 +286,7 @@ define i1 @PR8882(i64 %i) {
define i1 @test24_as1(i64 %i) {
; CHECK-LABEL: @test24_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[I:%.*]], 65535
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], 1000
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1:%.*]], 1000
; CHECK-NEXT: ret i1 [[CMP]]
;
%p1 = getelementptr inbounds i32, ptr addrspace(1) @X_as1, i64 %i
@@ -449,9 +448,9 @@ define i1 @test_gep_eq_no_inbounds(ptr %foo, i64 %i, i64 %j) {
define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[I:%.*]] to i16
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = trunc nsw i64 [[J:%.*]] to i16
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -686,13 +685,13 @@ define i1 @test_scalable_ij(ptr %foo, i64 %i, i64 %j) {
define i1 @gep_nuw(ptr %p, i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: @gep_nuw(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i64 [[A:%.*]], 2
-; CHECK-NEXT: [[GEP1_IDX1:%.*]] = shl nuw i64 [[B:%.*]], 1
-; CHECK-NEXT: [[GEP1_OFFS:%.*]] = add nuw i64 [[GEP1_IDX]], [[GEP1_IDX1]]
-; CHECK-NEXT: [[GEP2_IDX:%.*]] = shl nuw i64 [[C:%.*]], 3
-; CHECK-NEXT: [[GEP2_IDX2:%.*]] = shl nuw i64 [[D:%.*]], 2
-; CHECK-NEXT: [[GEP2_OFFS:%.*]] = add nuw i64 [[GEP2_IDX]], [[GEP2_IDX2]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[GEP1_OFFS]], [[GEP2_OFFS]]
+; CHECK-NEXT: [[GEP1_SPLIT_IDX:%.*]] = shl nuw i64 [[A:%.*]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i64 [[B:%.*]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw i64 [[GEP1_SPLIT_IDX]], [[GEP1_IDX]]
+; CHECK-NEXT: [[GEP2_SPLIT_IDX:%.*]] = shl nuw i64 [[C:%.*]], 3
+; CHECK-NEXT: [[GEP2_IDX:%.*]] = shl nuw i64 [[D:%.*]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = add nuw i64 [[GEP2_SPLIT_IDX]], [[GEP2_IDX]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%gep1 = getelementptr nuw [2 x i16], ptr %p, i64 %a, i64 %b
@@ -703,13 +702,13 @@ define i1 @gep_nuw(ptr %p, i64 %a, i64 %b, i64 %c, i64 %d) {
define i1 @gep_nusw(ptr %p, i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: @gep_nusw(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[A:%.*]], 2
-; CHECK-NEXT: [[GEP1_IDX1:%.*]] = shl nsw i64 [[B:%.*]], 1
-; CHECK-NEXT: [[GEP1_OFFS:%.*]] = add nsw i64 [[GEP1_IDX]], [[GEP1_IDX1]]
-; CHECK-NEXT: [[GEP2_IDX:%.*]] = shl nsw i64 [[C:%.*]], 3
-; CHECK-NEXT: [[GEP2_IDX2:%.*]] = shl nsw i64 [[D:%.*]], 2
-; CHECK-NEXT: [[GEP2_OFFS:%.*]] = add nsw i64 [[GEP2_IDX]], [[GEP2_IDX2]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[GEP1_OFFS]], [[GEP2_OFFS]]
+; CHECK-NEXT: [[GEP1_SPLIT_IDX:%.*]] = shl nsw i64 [[A:%.*]], 2
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[B:%.*]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[GEP1_SPLIT_IDX]], [[GEP1_IDX]]
+; CHECK-NEXT: [[GEP2_SPLIT_IDX:%.*]] = shl nsw i64 [[C:%.*]], 3
+; CHECK-NEXT: [[GEP2_IDX:%.*]] = shl nsw i64 [[D:%.*]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[GEP2_SPLIT_IDX]], [[GEP2_IDX]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%gep1 = getelementptr nusw [2 x i16], ptr %p, i64 %a, i64 %b
diff --git a/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll b/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
index 53c9736..07486ff 100644
--- a/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
+++ b/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
@@ -376,7 +376,7 @@ define i1 @test8(ptr %in, i64 %offset) {
; CHECK-NEXT: [[LD:%.*]] = load i64, ptr [[IN:%.*]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[LD]] to i32
; CHECK-NEXT: [[CASTI8:%.*]] = inttoptr i32 [[TMP0]] to ptr
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[OFFSET:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[OFFSET:%.*]] to i32
; CHECK-NEXT: [[GEPI8:%.*]] = getelementptr inbounds i8, ptr [[CASTI8]], i32 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[LD]] to i32
; CHECK-NEXT: [[PTRCAST:%.*]] = inttoptr i32 [[TMP2]] to ptr
diff --git a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
index c05cca9..ac44e6c 100644
--- a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
+++ b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
@@ -261,14 +261,11 @@ define i8 @knownbits_umax_select_test() {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i8 [ 0, [[ENTRY:%.*]] ], [ [[CONTAIN:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[COND0:%.*]] = call i1 @cond()
-; CHECK-NEXT: [[CONTAIN]] = call i8 @llvm.umax.i8(i8 [[INDVAR]], i8 1)
; CHECK-NEXT: [[COND1:%.*]] = call i1 @cond()
; CHECK-NEXT: br i1 [[COND1]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[BOOL:%.*]] = and i8 [[CONTAIN]], 1
-; CHECK-NEXT: ret i8 [[BOOL]]
+; CHECK-NEXT: ret i8 1
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index 12be81b..df34e7d 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -215,10 +215,7 @@ define i1 @test10_struct(i32 %x) {
define i1 @test10_struct_noinbounds(i32 %x) {
; CHECK-LABEL: @test10_struct_noinbounds(
-; CHECK-NEXT: [[P:%.*]] = getelementptr [[FOO:%.*]], ptr @GS, i32 [[X:%.*]], i32 0
-; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 4
-; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 9
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: ret i1 false
;
%p = getelementptr %Foo, ptr @GS, i32 %x, i32 0
%q = load i32, ptr %p
@@ -252,11 +249,7 @@ define i1 @test10_struct_i64(i64 %x){
define i1 @test10_struct_noinbounds_i16(i16 %x) {
; CHECK-LABEL: @test10_struct_noinbounds_i16(
-; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
-; CHECK-NEXT: [[P:%.*]] = getelementptr [[FOO:%.*]], ptr @GS, i32 [[TMP1]], i32 0
-; CHECK-NEXT: [[Q:%.*]] = load i32, ptr [[P]], align 4
-; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Q]], 0
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: ret i1 false
;
%p = getelementptr %Foo, ptr @GS, i16 %x, i32 0
%q = load i32, ptr %p
@@ -300,8 +293,7 @@ define i1 @test10_struct_arr_i16(i16 %x) {
define i1 @test10_struct_arr_i64(i64 %x) {
; CHECK-LABEL: @test10_struct_arr_i64(
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 4294967295
-; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1]], 1
+; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1:%.*]], 1
; CHECK-NEXT: ret i1 [[R]]
;
%p = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i64 0, i64 %x, i32 2
@@ -338,7 +330,7 @@ define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
define i1 @pr93017(i64 %idx) {
; CHECK-LABEL: @pr93017(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[IDX:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[IDX:%.*]] to i32
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr @table, i32 0, i32 [[TMP1]]
; CHECK-NEXT: [[V:%.*]] = load ptr, ptr [[GEP]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[V]], null
diff --git a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
index 098f2ee..2cdc73e 100644
--- a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
+++ b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -33,7 +33,8 @@ define <2 x i64> @hem_2d(i32 %i, i32 %j) {
; CHECK-LABEL: @hem_2d(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[J:%.*]] to i64
-; CHECK-NEXT: [[T:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]], i64 [[TMP2]]
+; CHECK-NEXT: [[T_SPLIT:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]]
+; CHECK-NEXT: [[T:%.*]] = getelementptr [13 x <2 x i64>], ptr [[T_SPLIT]], i64 0, i64 [[TMP2]]
; CHECK-NEXT: [[L:%.*]] = load <2 x i64>, ptr [[T]], align 1
; CHECK-NEXT: ret <2 x i64> [[L]]
;
@@ -90,7 +91,8 @@ define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
; CHECK-LABEL: @hem_2d_store(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[J:%.*]] to i64
-; CHECK-NEXT: [[T:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]], i64 [[TMP2]]
+; CHECK-NEXT: [[T_SPLIT:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]]
+; CHECK-NEXT: [[T:%.*]] = getelementptr [13 x <2 x i64>], ptr [[T_SPLIT]], i64 0, i64 [[TMP2]]
; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr [[T]], align 1
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/or-packed-int-vecs.ll b/llvm/test/Transforms/InstCombine/or-packed-int-vecs.ll
new file mode 100644
index 0000000..9391fb5
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/or-packed-int-vecs.ll
@@ -0,0 +1,926 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt %s -passes=instcombine -data-layout="E" -S | FileCheck %s --check-prefixes=CHECK,CHECK-BE
+; RUN: opt %s -passes=instcombine -data-layout="e" -S | FileCheck %s --check-prefixes=CHECK,CHECK-LE
+
+define i32 @bitcast.v2i.le(<4 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @bitcast.v2i.le(
+; CHECK-BE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <4 x i8> [[V]], i64 0
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <4 x i8> [[V]], i64 1
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i8 [[V_1]] to i32
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i32 [[Z_1]], 8
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i32 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_2:%.*]] = extractelement <4 x i8> [[V]], i64 2
+; CHECK-BE-NEXT: [[Z_2:%.*]] = zext i8 [[V_2]] to i32
+; CHECK-BE-NEXT: [[S_2:%.*]] = shl nuw nsw i32 [[Z_2]], 16
+; CHECK-BE-NEXT: [[X_2:%.*]] = or disjoint i32 [[X_1]], [[S_2]]
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <4 x i8> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i32 [[X_2]], [[S_3]]
+; CHECK-BE-NEXT: ret i32 [[X_3]]
+;
+; CHECK-LE-LABEL: define i32 @bitcast.v2i.le(
+; CHECK-LE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X_3:%.*]] = bitcast <4 x i8> [[V]] to i32
+; CHECK-LE-NEXT: ret i32 [[X_3]]
+;
+ %v.0 = extractelement <4 x i8> %v, i64 0
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.1 = extractelement <4 x i8> %v, i64 1
+ %z.1 = zext i8 %v.1 to i32
+ %s.1 = shl i32 %z.1, 8
+ %x.1 = or i32 %z.0, %s.1
+
+ %v.2 = extractelement <4 x i8> %v, i64 2
+ %z.2 = zext i8 %v.2 to i32
+ %s.2 = shl i32 %z.2, 16
+ %x.2 = or i32 %x.1, %s.2
+
+ %v.3 = extractelement <4 x i8> %v, i64 3
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %x.2, %s.3
+
+ ret i32 %x.3
+}
+
+define i32 @bitcast.v2i.be(<4 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @bitcast.v2i.be(
+; CHECK-BE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X_3:%.*]] = bitcast <4 x i8> [[V]] to i32
+; CHECK-BE-NEXT: ret i32 [[X_3]]
+;
+; CHECK-LE-LABEL: define i32 @bitcast.v2i.be(
+; CHECK-LE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <4 x i8> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-LE-NEXT: [[V_1:%.*]] = extractelement <4 x i8> [[V]], i64 2
+; CHECK-LE-NEXT: [[Z_1:%.*]] = zext i8 [[V_1]] to i32
+; CHECK-LE-NEXT: [[S_1:%.*]] = shl nuw nsw i32 [[Z_1]], 8
+; CHECK-LE-NEXT: [[X_1:%.*]] = or disjoint i32 [[S_1]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_2:%.*]] = extractelement <4 x i8> [[V]], i64 1
+; CHECK-LE-NEXT: [[Z_2:%.*]] = zext i8 [[V_2]] to i32
+; CHECK-LE-NEXT: [[S_2:%.*]] = shl nuw nsw i32 [[Z_2]], 16
+; CHECK-LE-NEXT: [[X_2:%.*]] = or disjoint i32 [[X_1]], [[S_2]]
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <4 x i8> [[V]], i64 0
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i32 [[X_2]], [[S_3]]
+; CHECK-LE-NEXT: ret i32 [[X_3]]
+;
+ %v.0 = extractelement <4 x i8> %v, i64 3
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.1 = extractelement <4 x i8> %v, i64 2
+ %z.1 = zext i8 %v.1 to i32
+ %s.1 = shl i32 %z.1, 8
+ %x.1 = or i32 %z.0, %s.1
+
+ %v.2 = extractelement <4 x i8> %v, i64 1
+ %z.2 = zext i8 %v.2 to i32
+ %s.2 = shl i32 %z.2, 16
+ %x.2 = or i32 %x.1, %s.2
+
+ %v.3 = extractelement <4 x i8> %v, i64 0
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %x.2, %s.3
+
+ ret i32 %x.3
+}
+
+define i64 @bitcast.v2i.le.i16(<4 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @bitcast.v2i.le.i16(
+; CHECK-BE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <4 x i16> [[V]], i64 0
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <4 x i16> [[V]], i64 1
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i16 [[V_1]] to i64
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i64 [[Z_1]], 16
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i64 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_2:%.*]] = extractelement <4 x i16> [[V]], i64 2
+; CHECK-BE-NEXT: [[Z_2:%.*]] = zext i16 [[V_2]] to i64
+; CHECK-BE-NEXT: [[S_2:%.*]] = shl nuw nsw i64 [[Z_2]], 32
+; CHECK-BE-NEXT: [[X_2:%.*]] = or disjoint i64 [[X_1]], [[S_2]]
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <4 x i16> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i64 [[X_2]], [[S_3]]
+; CHECK-BE-NEXT: ret i64 [[X_3]]
+;
+; CHECK-LE-LABEL: define i64 @bitcast.v2i.le.i16(
+; CHECK-LE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X_3:%.*]] = bitcast <4 x i16> [[V]] to i64
+; CHECK-LE-NEXT: ret i64 [[X_3]]
+;
+ %v.0 = extractelement <4 x i16> %v, i64 0
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.1 = extractelement <4 x i16> %v, i64 1
+ %z.1 = zext i16 %v.1 to i64
+ %s.1 = shl i64 %z.1, 16
+ %x.1 = or i64 %z.0, %s.1
+
+ %v.2 = extractelement <4 x i16> %v, i64 2
+ %z.2 = zext i16 %v.2 to i64
+ %s.2 = shl i64 %z.2, 32
+ %x.2 = or i64 %x.1, %s.2
+
+ %v.3 = extractelement <4 x i16> %v, i64 3
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %x.2, %s.3
+
+ ret i64 %x.3
+}
+
+define i64 @bitcast.v2i.be.i16(<4 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @bitcast.v2i.be.i16(
+; CHECK-BE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X_3:%.*]] = bitcast <4 x i16> [[V]] to i64
+; CHECK-BE-NEXT: ret i64 [[X_3]]
+;
+; CHECK-LE-LABEL: define i64 @bitcast.v2i.be.i16(
+; CHECK-LE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <4 x i16> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-LE-NEXT: [[V_1:%.*]] = extractelement <4 x i16> [[V]], i64 2
+; CHECK-LE-NEXT: [[Z_1:%.*]] = zext i16 [[V_1]] to i64
+; CHECK-LE-NEXT: [[S_1:%.*]] = shl nuw nsw i64 [[Z_1]], 16
+; CHECK-LE-NEXT: [[X_1:%.*]] = or disjoint i64 [[S_1]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_2:%.*]] = extractelement <4 x i16> [[V]], i64 1
+; CHECK-LE-NEXT: [[Z_2:%.*]] = zext i16 [[V_2]] to i64
+; CHECK-LE-NEXT: [[S_2:%.*]] = shl nuw nsw i64 [[Z_2]], 32
+; CHECK-LE-NEXT: [[X_2:%.*]] = or disjoint i64 [[X_1]], [[S_2]]
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <4 x i16> [[V]], i64 0
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i64 [[X_2]], [[S_3]]
+; CHECK-LE-NEXT: ret i64 [[X_3]]
+;
+ %v.0 = extractelement <4 x i16> %v, i64 3
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.1 = extractelement <4 x i16> %v, i64 2
+ %z.1 = zext i16 %v.1 to i64
+ %s.1 = shl i64 %z.1, 16
+ %x.1 = or i64 %z.0, %s.1
+
+ %v.2 = extractelement <4 x i16> %v, i64 1
+ %z.2 = zext i16 %v.2 to i64
+ %s.2 = shl i64 %z.2, 32
+ %x.2 = or i64 %x.1, %s.2
+
+ %v.3 = extractelement <4 x i16> %v, i64 0
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %x.2, %s.3
+
+ ret i64 %x.3
+}
+
+define i32 @bitcast.v2i.le.tree(<4 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @bitcast.v2i.le.tree(
+; CHECK-BE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <4 x i8> [[V]], i64 0
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <4 x i8> [[V]], i64 1
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i8 [[V_1]] to i32
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i32 [[Z_1]], 8
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i32 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_2:%.*]] = extractelement <4 x i8> [[V]], i64 2
+; CHECK-BE-NEXT: [[Z_2:%.*]] = zext i8 [[V_2]] to i32
+; CHECK-BE-NEXT: [[S_2:%.*]] = shl nuw nsw i32 [[Z_2]], 16
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <4 x i8> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i32 [[S_2]], [[S_3]]
+; CHECK-BE-NEXT: [[X:%.*]] = or disjoint i32 [[X_1]], [[X_3]]
+; CHECK-BE-NEXT: ret i32 [[X]]
+;
+; CHECK-LE-LABEL: define i32 @bitcast.v2i.le.tree(
+; CHECK-LE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X:%.*]] = bitcast <4 x i8> [[V]] to i32
+; CHECK-LE-NEXT: ret i32 [[X]]
+;
+ %v.0 = extractelement <4 x i8> %v, i64 0
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.1 = extractelement <4 x i8> %v, i64 1
+ %z.1 = zext i8 %v.1 to i32
+ %s.1 = shl i32 %z.1, 8
+ %x.1 = or i32 %z.0, %s.1
+
+ %v.2 = extractelement <4 x i8> %v, i64 2
+ %z.2 = zext i8 %v.2 to i32
+ %s.2 = shl i32 %z.2, 16
+
+ %v.3 = extractelement <4 x i8> %v, i64 3
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %s.2, %s.3
+
+ %x = or i32 %x.1, %x.3
+
+ ret i32 %x
+}
+
+define i32 @bitcast.v2i.be.tree(<4 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @bitcast.v2i.be.tree(
+; CHECK-BE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X:%.*]] = bitcast <4 x i8> [[V]] to i32
+; CHECK-BE-NEXT: ret i32 [[X]]
+;
+; CHECK-LE-LABEL: define i32 @bitcast.v2i.be.tree(
+; CHECK-LE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <4 x i8> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-LE-NEXT: [[V_1:%.*]] = extractelement <4 x i8> [[V]], i64 2
+; CHECK-LE-NEXT: [[Z_1:%.*]] = zext i8 [[V_1]] to i32
+; CHECK-LE-NEXT: [[S_1:%.*]] = shl nuw nsw i32 [[Z_1]], 8
+; CHECK-LE-NEXT: [[X_1:%.*]] = or disjoint i32 [[S_1]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_2:%.*]] = extractelement <4 x i8> [[V]], i64 1
+; CHECK-LE-NEXT: [[Z_2:%.*]] = zext i8 [[V_2]] to i32
+; CHECK-LE-NEXT: [[S_2:%.*]] = shl nuw nsw i32 [[Z_2]], 16
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <4 x i8> [[V]], i64 0
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i32 [[S_2]], [[S_3]]
+; CHECK-LE-NEXT: [[X:%.*]] = or disjoint i32 [[X_1]], [[X_3]]
+; CHECK-LE-NEXT: ret i32 [[X]]
+;
+ %v.0 = extractelement <4 x i8> %v, i64 3
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.1 = extractelement <4 x i8> %v, i64 2
+ %z.1 = zext i8 %v.1 to i32
+ %s.1 = shl i32 %z.1, 8
+ %x.1 = or i32 %z.0, %s.1
+
+ %v.2 = extractelement <4 x i8> %v, i64 1
+ %z.2 = zext i8 %v.2 to i32
+ %s.2 = shl i32 %z.2, 16
+
+ %v.3 = extractelement <4 x i8> %v, i64 0
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %s.2, %s.3
+
+ %x = or i32 %x.1, %x.3
+
+ ret i32 %x
+}
+
+define i64 @bitcast.v2i.le.tree.i16(<4 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @bitcast.v2i.le.tree.i16(
+; CHECK-BE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <4 x i16> [[V]], i64 0
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <4 x i16> [[V]], i64 1
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i16 [[V_1]] to i64
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i64 [[Z_1]], 16
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i64 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_2:%.*]] = extractelement <4 x i16> [[V]], i64 2
+; CHECK-BE-NEXT: [[Z_2:%.*]] = zext i16 [[V_2]] to i64
+; CHECK-BE-NEXT: [[S_2:%.*]] = shl nuw nsw i64 [[Z_2]], 32
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <4 x i16> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i64 [[S_2]], [[S_3]]
+; CHECK-BE-NEXT: [[X:%.*]] = or disjoint i64 [[X_1]], [[X_3]]
+; CHECK-BE-NEXT: ret i64 [[X]]
+;
+; CHECK-LE-LABEL: define i64 @bitcast.v2i.le.tree.i16(
+; CHECK-LE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X:%.*]] = bitcast <4 x i16> [[V]] to i64
+; CHECK-LE-NEXT: ret i64 [[X]]
+;
+ %v.0 = extractelement <4 x i16> %v, i64 0
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.1 = extractelement <4 x i16> %v, i64 1
+ %z.1 = zext i16 %v.1 to i64
+ %s.1 = shl i64 %z.1, 16
+ %x.1 = or i64 %z.0, %s.1
+
+ %v.2 = extractelement <4 x i16> %v, i64 2
+ %z.2 = zext i16 %v.2 to i64
+ %s.2 = shl i64 %z.2, 32
+
+ %v.3 = extractelement <4 x i16> %v, i64 3
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %s.2, %s.3
+
+ %x = or i64 %x.1, %x.3
+
+ ret i64 %x
+}
+
+define i64 @bitcast.v2i.be.tree.i16(<4 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @bitcast.v2i.be.tree.i16(
+; CHECK-BE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X:%.*]] = bitcast <4 x i16> [[V]] to i64
+; CHECK-BE-NEXT: ret i64 [[X]]
+;
+; CHECK-LE-LABEL: define i64 @bitcast.v2i.be.tree.i16(
+; CHECK-LE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <4 x i16> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-LE-NEXT: [[V_1:%.*]] = extractelement <4 x i16> [[V]], i64 2
+; CHECK-LE-NEXT: [[Z_1:%.*]] = zext i16 [[V_1]] to i64
+; CHECK-LE-NEXT: [[S_1:%.*]] = shl nuw nsw i64 [[Z_1]], 16
+; CHECK-LE-NEXT: [[X_1:%.*]] = or disjoint i64 [[S_1]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_2:%.*]] = extractelement <4 x i16> [[V]], i64 1
+; CHECK-LE-NEXT: [[Z_2:%.*]] = zext i16 [[V_2]] to i64
+; CHECK-LE-NEXT: [[S_2:%.*]] = shl nuw nsw i64 [[Z_2]], 32
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <4 x i16> [[V]], i64 0
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i64 [[S_2]], [[S_3]]
+; CHECK-LE-NEXT: [[X:%.*]] = or disjoint i64 [[X_1]], [[X_3]]
+; CHECK-LE-NEXT: ret i64 [[X]]
+;
+ %v.0 = extractelement <4 x i16> %v, i64 3
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.1 = extractelement <4 x i16> %v, i64 2
+ %z.1 = zext i16 %v.1 to i64
+ %s.1 = shl i64 %z.1, 16
+ %x.1 = or i64 %z.0, %s.1
+
+ %v.2 = extractelement <4 x i16> %v, i64 1
+ %z.2 = zext i16 %v.2 to i64
+ %s.2 = shl i64 %z.2, 32
+
+ %v.3 = extractelement <4 x i16> %v, i64 0
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %s.2, %s.3
+
+ %x = or i64 %x.1, %x.3
+
+ ret i64 %x
+}
+
+define i32 @extract.le.i32(<8 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @extract.le.i32(
+; CHECK-BE-SAME: <8 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <8 x i8> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <8 x i8> [[V]], i64 4
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i8 [[V_1]] to i32
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i32 [[Z_1]], 8
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i32 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_2:%.*]] = extractelement <8 x i8> [[V]], i64 5
+; CHECK-BE-NEXT: [[Z_2:%.*]] = zext i8 [[V_2]] to i32
+; CHECK-BE-NEXT: [[S_2:%.*]] = shl nuw nsw i32 [[Z_2]], 16
+; CHECK-BE-NEXT: [[X_2:%.*]] = or disjoint i32 [[X_1]], [[S_2]]
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <8 x i8> [[V]], i64 6
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i32 [[X_2]], [[S_3]]
+; CHECK-BE-NEXT: ret i32 [[X_3]]
+;
+; CHECK-LE-LABEL: define i32 @extract.le.i32(
+; CHECK-LE-SAME: <8 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X_3_V_EXTRACT:%.*]] = shufflevector <8 x i8> [[V]], <8 x i8> poison, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-LE-NEXT: [[X_3_V_BC:%.*]] = bitcast <8 x i8> [[X_3_V_EXTRACT]] to <2 x i32>
+; CHECK-LE-NEXT: [[X_3_V_EXTRACT1:%.*]] = extractelement <2 x i32> [[X_3_V_BC]], i64 0
+; CHECK-LE-NEXT: ret i32 [[X_3_V_EXTRACT1]]
+;
+ %v.0 = extractelement <8 x i8> %v, i64 3
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.1 = extractelement <8 x i8> %v, i64 4
+ %z.1 = zext i8 %v.1 to i32
+ %s.1 = shl i32 %z.1, 8
+ %x.1 = or i32 %z.0, %s.1
+
+ %v.2 = extractelement <8 x i8> %v, i64 5
+ %z.2 = zext i8 %v.2 to i32
+ %s.2 = shl i32 %z.2, 16
+ %x.2 = or i32 %x.1, %s.2
+
+ %v.3 = extractelement <8 x i8> %v, i64 6
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %x.2, %s.3
+
+ ret i32 %x.3
+}
+
+define i32 @extract.be.i32(<8 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @extract.be.i32(
+; CHECK-BE-SAME: <8 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X_3_V_EXTRACT:%.*]] = shufflevector <8 x i8> [[V]], <8 x i8> poison, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-BE-NEXT: [[X_3_V_BC:%.*]] = bitcast <8 x i8> [[X_3_V_EXTRACT]] to <2 x i32>
+; CHECK-BE-NEXT: [[X_3_V_EXTRACT1:%.*]] = extractelement <2 x i32> [[X_3_V_BC]], i64 0
+; CHECK-BE-NEXT: ret i32 [[X_3_V_EXTRACT1]]
+;
+; CHECK-LE-LABEL: define i32 @extract.be.i32(
+; CHECK-LE-SAME: <8 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <8 x i8> [[V]], i64 6
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-LE-NEXT: [[V_1:%.*]] = extractelement <8 x i8> [[V]], i64 5
+; CHECK-LE-NEXT: [[Z_1:%.*]] = zext i8 [[V_1]] to i32
+; CHECK-LE-NEXT: [[S_1:%.*]] = shl nuw nsw i32 [[Z_1]], 8
+; CHECK-LE-NEXT: [[X_1:%.*]] = or disjoint i32 [[S_1]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_2:%.*]] = extractelement <8 x i8> [[V]], i64 4
+; CHECK-LE-NEXT: [[Z_2:%.*]] = zext i8 [[V_2]] to i32
+; CHECK-LE-NEXT: [[S_2:%.*]] = shl nuw nsw i32 [[Z_2]], 16
+; CHECK-LE-NEXT: [[X_2:%.*]] = or disjoint i32 [[X_1]], [[S_2]]
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <8 x i8> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i32 [[X_2]], [[S_3]]
+; CHECK-LE-NEXT: ret i32 [[X_3]]
+;
+ %v.0 = extractelement <8 x i8> %v, i64 6
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.1 = extractelement <8 x i8> %v, i64 5
+ %z.1 = zext i8 %v.1 to i32
+ %s.1 = shl i32 %z.1, 8
+ %x.1 = or i32 %z.0, %s.1
+
+ %v.2 = extractelement <8 x i8> %v, i64 4
+ %z.2 = zext i8 %v.2 to i32
+ %s.2 = shl i32 %z.2, 16
+ %x.2 = or i32 %x.1, %s.2
+
+ %v.3 = extractelement <8 x i8> %v, i64 3
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %x.2, %s.3
+
+ ret i32 %x.3
+}
+
+define i64 @extract.le.i64(<8 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @extract.le.i64(
+; CHECK-BE-SAME: <8 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <8 x i16> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <8 x i16> [[V]], i64 4
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i16 [[V_1]] to i64
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i64 [[Z_1]], 16
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i64 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_2:%.*]] = extractelement <8 x i16> [[V]], i64 5
+; CHECK-BE-NEXT: [[Z_2:%.*]] = zext i16 [[V_2]] to i64
+; CHECK-BE-NEXT: [[S_2:%.*]] = shl nuw nsw i64 [[Z_2]], 32
+; CHECK-BE-NEXT: [[X_2:%.*]] = or disjoint i64 [[X_1]], [[S_2]]
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <8 x i16> [[V]], i64 6
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i64 [[X_2]], [[S_3]]
+; CHECK-BE-NEXT: ret i64 [[X_3]]
+;
+; CHECK-LE-LABEL: define i64 @extract.le.i64(
+; CHECK-LE-SAME: <8 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X_3_V_EXTRACT:%.*]] = shufflevector <8 x i16> [[V]], <8 x i16> poison, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-LE-NEXT: [[X_3_V_BC:%.*]] = bitcast <8 x i16> [[X_3_V_EXTRACT]] to <2 x i64>
+; CHECK-LE-NEXT: [[X_3_V_EXTRACT1:%.*]] = extractelement <2 x i64> [[X_3_V_BC]], i64 0
+; CHECK-LE-NEXT: ret i64 [[X_3_V_EXTRACT1]]
+;
+ %v.0 = extractelement <8 x i16> %v, i64 3
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.1 = extractelement <8 x i16> %v, i64 4
+ %z.1 = zext i16 %v.1 to i64
+ %s.1 = shl i64 %z.1, 16
+ %x.1 = or i64 %z.0, %s.1
+
+ %v.2 = extractelement <8 x i16> %v, i64 5
+ %z.2 = zext i16 %v.2 to i64
+ %s.2 = shl i64 %z.2, 32
+ %x.2 = or i64 %x.1, %s.2
+
+ %v.3 = extractelement <8 x i16> %v, i64 6
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %x.2, %s.3
+
+ ret i64 %x.3
+}
+
+define i64 @extract.be.i64(<8 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @extract.be.i64(
+; CHECK-BE-SAME: <8 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X_3_V_EXTRACT:%.*]] = shufflevector <8 x i16> [[V]], <8 x i16> poison, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-BE-NEXT: [[X_3_V_BC:%.*]] = bitcast <8 x i16> [[X_3_V_EXTRACT]] to <2 x i64>
+; CHECK-BE-NEXT: [[X_3_V_EXTRACT1:%.*]] = extractelement <2 x i64> [[X_3_V_BC]], i64 0
+; CHECK-BE-NEXT: ret i64 [[X_3_V_EXTRACT1]]
+;
+; CHECK-LE-LABEL: define i64 @extract.be.i64(
+; CHECK-LE-SAME: <8 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <8 x i16> [[V]], i64 6
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-LE-NEXT: [[V_1:%.*]] = extractelement <8 x i16> [[V]], i64 5
+; CHECK-LE-NEXT: [[Z_1:%.*]] = zext i16 [[V_1]] to i64
+; CHECK-LE-NEXT: [[S_1:%.*]] = shl nuw nsw i64 [[Z_1]], 16
+; CHECK-LE-NEXT: [[X_1:%.*]] = or disjoint i64 [[S_1]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_2:%.*]] = extractelement <8 x i16> [[V]], i64 4
+; CHECK-LE-NEXT: [[Z_2:%.*]] = zext i16 [[V_2]] to i64
+; CHECK-LE-NEXT: [[S_2:%.*]] = shl nuw nsw i64 [[Z_2]], 32
+; CHECK-LE-NEXT: [[X_2:%.*]] = or disjoint i64 [[X_1]], [[S_2]]
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <8 x i16> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i64 [[X_2]], [[S_3]]
+; CHECK-LE-NEXT: ret i64 [[X_3]]
+;
+ %v.0 = extractelement <8 x i16> %v, i64 6
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.1 = extractelement <8 x i16> %v, i64 5
+ %z.1 = zext i16 %v.1 to i64
+ %s.1 = shl i64 %z.1, 16
+ %x.1 = or i64 %z.0, %s.1
+
+ %v.2 = extractelement <8 x i16> %v, i64 4
+ %z.2 = zext i16 %v.2 to i64
+ %s.2 = shl i64 %z.2, 32
+ %x.2 = or i64 %x.1, %s.2
+
+ %v.3 = extractelement <8 x i16> %v, i64 3
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %x.2, %s.3
+
+ ret i64 %x.3
+}
+
+define i32 @partial.le(<4 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @partial.le(
+; CHECK-BE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <4 x i8> [[V]], i64 0
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <4 x i8> [[V]], i64 1
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i8 [[V_1]] to i32
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i32 [[Z_1]], 8
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i32 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <4 x i8> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i32 [[X_1]], [[S_3]]
+; CHECK-BE-NEXT: ret i32 [[X_3]]
+;
+; CHECK-LE-LABEL: define i32 @partial.le(
+; CHECK-LE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X_3_V1:%.*]] = insertelement <4 x i8> [[V]], i8 0, i64 2
+; CHECK-LE-NEXT: [[X_3:%.*]] = bitcast <4 x i8> [[X_3_V1]] to i32
+; CHECK-LE-NEXT: ret i32 [[X_3]]
+;
+ %v.0 = extractelement <4 x i8> %v, i64 0
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.1 = extractelement <4 x i8> %v, i64 1
+ %z.1 = zext i8 %v.1 to i32
+ %s.1 = shl i32 %z.1, 8
+ %x.1 = or i32 %z.0, %s.1
+
+ %v.3 = extractelement <4 x i8> %v, i64 3
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %x.1, %s.3
+
+ ret i32 %x.3
+}
+
+define i32 @partial.be(<4 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @partial.be(
+; CHECK-BE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X_3_V1:%.*]] = insertelement <4 x i8> [[V]], i8 0, i64 2
+; CHECK-BE-NEXT: [[X_3:%.*]] = bitcast <4 x i8> [[X_3_V1]] to i32
+; CHECK-BE-NEXT: ret i32 [[X_3]]
+;
+; CHECK-LE-LABEL: define i32 @partial.be(
+; CHECK-LE-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <4 x i8> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-LE-NEXT: [[V_1:%.*]] = extractelement <4 x i8> [[V]], i64 1
+; CHECK-LE-NEXT: [[Z_1:%.*]] = zext i8 [[V_1]] to i32
+; CHECK-LE-NEXT: [[S_1:%.*]] = shl nuw nsw i32 [[Z_1]], 16
+; CHECK-LE-NEXT: [[X_1:%.*]] = or disjoint i32 [[S_1]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <4 x i8> [[V]], i64 0
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i32 [[X_1]], [[S_3]]
+; CHECK-LE-NEXT: ret i32 [[X_3]]
+;
+ %v.0 = extractelement <4 x i8> %v, i64 3
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.1 = extractelement <4 x i8> %v, i64 1
+ %z.1 = zext i8 %v.1 to i32
+ %s.1 = shl i32 %z.1, 16
+ %x.1 = or i32 %z.0, %s.1
+
+ %v.3 = extractelement <4 x i8> %v, i64 0
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %x.1, %s.3
+
+ ret i32 %x.3
+}
+
+
+define i64 @partial.le.i16(<4 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @partial.le.i16(
+; CHECK-BE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <4 x i16> [[V]], i64 0
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <4 x i16> [[V]], i64 1
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i16 [[V_1]] to i64
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i64 [[Z_1]], 16
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i64 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <4 x i16> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i64 [[X_1]], [[S_3]]
+; CHECK-BE-NEXT: ret i64 [[X_3]]
+;
+; CHECK-LE-LABEL: define i64 @partial.le.i16(
+; CHECK-LE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X_3_V1:%.*]] = insertelement <4 x i16> [[V]], i16 0, i64 2
+; CHECK-LE-NEXT: [[X_3:%.*]] = bitcast <4 x i16> [[X_3_V1]] to i64
+; CHECK-LE-NEXT: ret i64 [[X_3]]
+;
+ %v.0 = extractelement <4 x i16> %v, i64 0
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.1 = extractelement <4 x i16> %v, i64 1
+ %z.1 = zext i16 %v.1 to i64
+ %s.1 = shl i64 %z.1, 16
+ %x.1 = or i64 %z.0, %s.1
+
+ %v.3 = extractelement <4 x i16> %v, i64 3
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %x.1, %s.3
+
+ ret i64 %x.3
+}
+
+define i64 @partial.be.i16(<4 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @partial.be.i16(
+; CHECK-BE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X_3_V1:%.*]] = insertelement <4 x i16> [[V]], i16 0, i64 2
+; CHECK-BE-NEXT: [[X_3:%.*]] = bitcast <4 x i16> [[X_3_V1]] to i64
+; CHECK-BE-NEXT: ret i64 [[X_3]]
+;
+; CHECK-LE-LABEL: define i64 @partial.be.i16(
+; CHECK-LE-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <4 x i16> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-LE-NEXT: [[V_1:%.*]] = extractelement <4 x i16> [[V]], i64 1
+; CHECK-LE-NEXT: [[Z_1:%.*]] = zext i16 [[V_1]] to i64
+; CHECK-LE-NEXT: [[S_1:%.*]] = shl nuw nsw i64 [[Z_1]], 32
+; CHECK-LE-NEXT: [[X_1:%.*]] = or disjoint i64 [[S_1]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <4 x i16> [[V]], i64 0
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i64 [[X_1]], [[S_3]]
+; CHECK-LE-NEXT: ret i64 [[X_3]]
+;
+ %v.0 = extractelement <4 x i16> %v, i64 3
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.1 = extractelement <4 x i16> %v, i64 1
+ %z.1 = zext i16 %v.1 to i64
+ %s.1 = shl i64 %z.1, 32
+ %x.1 = or i64 %z.0, %s.1
+
+ %v.3 = extractelement <4 x i16> %v, i64 0
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %x.1, %s.3
+
+ ret i64 %x.3
+}
+
+define i32 @partial.extract.le.i32(<8 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @partial.extract.le.i32(
+; CHECK-BE-SAME: <8 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <8 x i8> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <8 x i8> [[V]], i64 4
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i8 [[V_1]] to i32
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i32 [[Z_1]], 8
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i32 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <8 x i8> [[V]], i64 6
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i32 [[X_1]], [[S_3]]
+; CHECK-BE-NEXT: ret i32 [[X_3]]
+;
+; CHECK-LE-LABEL: define i32 @partial.extract.le.i32(
+; CHECK-LE-SAME: <8 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X_3_V:%.*]] = shufflevector <8 x i8> [[V]], <8 x i8> <i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>, <4 x i32> <i32 3, i32 4, i32 8, i32 6>
+; CHECK-LE-NEXT: [[X_3:%.*]] = bitcast <4 x i8> [[X_3_V]] to i32
+; CHECK-LE-NEXT: ret i32 [[X_3]]
+;
+ %v.0 = extractelement <8 x i8> %v, i64 3
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.1 = extractelement <8 x i8> %v, i64 4
+ %z.1 = zext i8 %v.1 to i32
+ %s.1 = shl i32 %z.1, 8
+ %x.1 = or i32 %z.0, %s.1
+
+ %v.3 = extractelement <8 x i8> %v, i64 6
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %x.1, %s.3
+
+ ret i32 %x.3
+}
+
+define i32 @partial.extract.be.i32(<8 x i8> %v) {
+; CHECK-BE-LABEL: define i32 @partial.extract.be.i32(
+; CHECK-BE-SAME: <8 x i8> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X_3_V:%.*]] = shufflevector <8 x i8> [[V]], <8 x i8> <i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>, <4 x i32> <i32 3, i32 4, i32 8, i32 6>
+; CHECK-BE-NEXT: [[X_3:%.*]] = bitcast <4 x i8> [[X_3_V]] to i32
+; CHECK-BE-NEXT: ret i32 [[X_3]]
+;
+; CHECK-LE-LABEL: define i32 @partial.extract.be.i32(
+; CHECK-LE-SAME: <8 x i8> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <8 x i8> [[V]], i64 6
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i8 [[V_0]] to i32
+; CHECK-LE-NEXT: [[V_2:%.*]] = extractelement <8 x i8> [[V]], i64 4
+; CHECK-LE-NEXT: [[Z_2:%.*]] = zext i8 [[V_2]] to i32
+; CHECK-LE-NEXT: [[S_2:%.*]] = shl nuw nsw i32 [[Z_2]], 16
+; CHECK-LE-NEXT: [[X_2:%.*]] = or disjoint i32 [[S_2]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <8 x i8> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i8 [[V_3]] to i32
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i32 [[Z_3]], 24
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i32 [[X_2]], [[S_3]]
+; CHECK-LE-NEXT: ret i32 [[X_3]]
+;
+ %v.0 = extractelement <8 x i8> %v, i64 6
+ %z.0 = zext i8 %v.0 to i32
+
+ %v.2 = extractelement <8 x i8> %v, i64 4
+ %z.2 = zext i8 %v.2 to i32
+ %s.2 = shl i32 %z.2, 16
+ %x.2 = or i32 %z.0, %s.2
+
+ %v.3 = extractelement <8 x i8> %v, i64 3
+ %z.3 = zext i8 %v.3 to i32
+ %s.3 = shl i32 %z.3, 24
+ %x.3 = or i32 %x.2, %s.3
+
+ ret i32 %x.3
+}
+
+define i64 @partial.extract.le.i64(<8 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @partial.extract.le.i64(
+; CHECK-BE-SAME: <8 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[V_0:%.*]] = extractelement <8 x i16> [[V]], i64 3
+; CHECK-BE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-BE-NEXT: [[V_1:%.*]] = extractelement <8 x i16> [[V]], i64 4
+; CHECK-BE-NEXT: [[Z_1:%.*]] = zext i16 [[V_1]] to i64
+; CHECK-BE-NEXT: [[S_1:%.*]] = shl nuw nsw i64 [[Z_1]], 16
+; CHECK-BE-NEXT: [[X_1:%.*]] = or disjoint i64 [[S_1]], [[Z_0]]
+; CHECK-BE-NEXT: [[V_2:%.*]] = extractelement <8 x i16> [[V]], i64 5
+; CHECK-BE-NEXT: [[Z_2:%.*]] = zext i16 [[V_2]] to i64
+; CHECK-BE-NEXT: [[S_2:%.*]] = shl nuw nsw i64 [[Z_2]], 32
+; CHECK-BE-NEXT: [[X_2:%.*]] = or disjoint i64 [[X_1]], [[S_2]]
+; CHECK-BE-NEXT: [[V_3:%.*]] = extractelement <8 x i16> [[V]], i64 6
+; CHECK-BE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-BE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-BE-NEXT: [[X_3:%.*]] = or disjoint i64 [[X_2]], [[S_3]]
+; CHECK-BE-NEXT: ret i64 [[X_3]]
+;
+; CHECK-LE-LABEL: define i64 @partial.extract.le.i64(
+; CHECK-LE-SAME: <8 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[X_3_V_EXTRACT:%.*]] = shufflevector <8 x i16> [[V]], <8 x i16> poison, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-LE-NEXT: [[X_3_V_BC:%.*]] = bitcast <8 x i16> [[X_3_V_EXTRACT]] to <2 x i64>
+; CHECK-LE-NEXT: [[X_3_V_EXTRACT1:%.*]] = extractelement <2 x i64> [[X_3_V_BC]], i64 0
+; CHECK-LE-NEXT: ret i64 [[X_3_V_EXTRACT1]]
+;
+ %v.0 = extractelement <8 x i16> %v, i64 3
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.1 = extractelement <8 x i16> %v, i64 4
+ %z.1 = zext i16 %v.1 to i64
+ %s.1 = shl i64 %z.1, 16
+ %x.1 = or i64 %z.0, %s.1
+
+ %v.2 = extractelement <8 x i16> %v, i64 5
+ %z.2 = zext i16 %v.2 to i64
+ %s.2 = shl i64 %z.2, 32
+ %x.2 = or i64 %x.1, %s.2
+
+ %v.3 = extractelement <8 x i16> %v, i64 6
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %x.2, %s.3
+
+ ret i64 %x.3
+}
+
+define i64 @partial.extract.be.i64(<8 x i16> %v) {
+; CHECK-BE-LABEL: define i64 @partial.extract.be.i64(
+; CHECK-BE-SAME: <8 x i16> [[V:%.*]]) {
+; CHECK-BE-NEXT: [[X_3_V:%.*]] = shufflevector <8 x i16> [[V]], <8 x i16> <i16 0, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison>, <4 x i32> <i32 3, i32 4, i32 8, i32 6>
+; CHECK-BE-NEXT: [[X_3:%.*]] = bitcast <4 x i16> [[X_3_V]] to i64
+; CHECK-BE-NEXT: ret i64 [[X_3]]
+;
+; CHECK-LE-LABEL: define i64 @partial.extract.be.i64(
+; CHECK-LE-SAME: <8 x i16> [[V:%.*]]) {
+; CHECK-LE-NEXT: [[V_0:%.*]] = extractelement <8 x i16> [[V]], i64 6
+; CHECK-LE-NEXT: [[Z_0:%.*]] = zext i16 [[V_0]] to i64
+; CHECK-LE-NEXT: [[V_2:%.*]] = extractelement <8 x i16> [[V]], i64 4
+; CHECK-LE-NEXT: [[Z_2:%.*]] = zext i16 [[V_2]] to i64
+; CHECK-LE-NEXT: [[S_2:%.*]] = shl nuw nsw i64 [[Z_2]], 32
+; CHECK-LE-NEXT: [[X_2:%.*]] = or disjoint i64 [[S_2]], [[Z_0]]
+; CHECK-LE-NEXT: [[V_3:%.*]] = extractelement <8 x i16> [[V]], i64 3
+; CHECK-LE-NEXT: [[Z_3:%.*]] = zext i16 [[V_3]] to i64
+; CHECK-LE-NEXT: [[S_3:%.*]] = shl nuw i64 [[Z_3]], 48
+; CHECK-LE-NEXT: [[X_3:%.*]] = or disjoint i64 [[X_2]], [[S_3]]
+; CHECK-LE-NEXT: ret i64 [[X_3]]
+;
+ %v.0 = extractelement <8 x i16> %v, i64 6
+ %z.0 = zext i16 %v.0 to i64
+
+ %v.2 = extractelement <8 x i16> %v, i64 4
+ %z.2 = zext i16 %v.2 to i64
+ %s.2 = shl i64 %z.2, 32
+ %x.2 = or i64 %z.0, %s.2
+
+ %v.3 = extractelement <8 x i16> %v, i64 3
+ %z.3 = zext i16 %v.3 to i64
+ %s.3 = shl i64 %z.3, 48
+ %x.3 = or i64 %x.2, %s.3
+
+ ret i64 %x.3
+}
+
+define <2 x i16> @shufflecast.v2v(<4 x i8> %v) {
+; CHECK-LABEL: define <2 x i16> @shufflecast.v2v(
+; CHECK-SAME: <4 x i8> [[V:%.*]]) {
+; CHECK-NEXT: [[W_3:%.*]] = bitcast <4 x i8> [[V]] to <2 x i16>
+; CHECK-NEXT: ret <2 x i16> [[W_3]]
+;
+ %v.0 = shufflevector <4 x i8> %v, <4 x i8> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 4, i32 4>
+ %c.0 = bitcast <4 x i8> %v.0 to <2 x i16>
+
+ %v.1 = shufflevector <4 x i8> %v, <4 x i8> zeroinitializer, <4 x i32> <i32 4, i32 1, i32 4, i32 4>
+ %c.1 = bitcast <4 x i8> %v.1 to <2 x i16>
+ %w.1 = or <2 x i16> %c.0, %c.1
+
+ %v.2 = shufflevector <4 x i8> %v, <4 x i8> zeroinitializer, <4 x i32> <i32 4, i32 4, i32 2, i32 4>
+ %c.2 = bitcast <4 x i8> %v.2 to <2 x i16>
+ %w.2 = or <2 x i16> %w.1, %c.2
+
+ %v.3 = shufflevector <4 x i8> %v, <4 x i8> zeroinitializer, <4 x i32> <i32 4, i32 4, i32 4, i32 3>
+ %c.3 = bitcast <4 x i8> %v.3 to <2 x i16>
+ %w.3 = or <2 x i16> %w.2, %c.3
+
+ ret <2 x i16> %w.3
+}
+
+define <2 x i32> @shufflecast.v2v.i16(<4 x i16> %v) {
+; CHECK-LABEL: define <2 x i32> @shufflecast.v2v.i16(
+; CHECK-SAME: <4 x i16> [[V:%.*]]) {
+; CHECK-NEXT: [[W_3:%.*]] = bitcast <4 x i16> [[V]] to <2 x i32>
+; CHECK-NEXT: ret <2 x i32> [[W_3]]
+;
+ %v.0 = shufflevector <4 x i16> %v, <4 x i16> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 4, i32 4>
+ %c.0 = bitcast <4 x i16> %v.0 to <2 x i32>
+
+ %v.1 = shufflevector <4 x i16> %v, <4 x i16> zeroinitializer, <4 x i32> <i32 4, i32 1, i32 4, i32 4>
+ %c.1 = bitcast <4 x i16> %v.1 to <2 x i32>
+ %w.1 = or <2 x i32> %c.0, %c.1
+
+ %v.2 = shufflevector <4 x i16> %v, <4 x i16> zeroinitializer, <4 x i32> <i32 4, i32 4, i32 2, i32 4>
+ %c.2 = bitcast <4 x i16> %v.2 to <2 x i32>
+ %w.2 = or <2 x i32> %w.1, %c.2
+
+ %v.3 = shufflevector <4 x i16> %v, <4 x i16> zeroinitializer, <4 x i32> <i32 4, i32 4, i32 4, i32 3>
+ %c.3 = bitcast <4 x i16> %v.3 to <2 x i32>
+ %w.3 = or <2 x i32> %w.2, %c.3
+
+ ret <2 x i32> %w.3
+}
+
+define i32 @bitcast.v2i.half(<2 x half> %v) {
+; CHECK-LABEL: define i32 @bitcast.v2i.half(
+; CHECK-SAME: <2 x half> [[V:%.*]]) {
+; CHECK-NEXT: [[X:%.*]] = bitcast <2 x half> [[V]] to i32
+; CHECK-NEXT: ret i32 [[X]]
+;
+ %v.0 = insertelement <2 x half> %v, half 0.0, i64 1
+ %x.0 = bitcast <2 x half> %v.0 to i32
+
+ %v.1 = insertelement <2 x half> %v, half 0.0, i64 0
+ %x.1 = bitcast <2 x half> %v.1 to i32
+
+ %x = or i32 %x.0, %x.1
+ ret i32 %x
+}
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index 4756b4f..d9f729e 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -2998,3 +2998,30 @@ join:
%cmp = icmp eq i32 %phi, 0
ret i1 %cmp
}
+
+declare void @may_exit()
+
+define i32 @intrinsic_over_phi_noundef(i1 %c, i1 %c2, i32 %a) {
+; CHECK-LABEL: @intrinsic_over_phi_noundef(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.umax.i32(i32 [[A:%.*]], i32 1)
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[TMP0]], [[IF]] ], [ 1, [[ENTRY:%.*]] ]
+; CHECK-NEXT: call void @may_exit()
+; CHECK-NEXT: ret i32 [[PHI]]
+;
+entry:
+ br i1 %c, label %if, label %join
+
+if:
+ br label %join
+
+join:
+ %phi = phi i32 [ %a, %if ], [ 0, %entry ]
+ call void @may_exit()
+ %umax = call noundef i32 @llvm.umax(i32 noundef %phi, i32 1)
+ ret i32 %umax
+}
diff --git a/llvm/test/Transforms/InstCombine/pr39908.ll b/llvm/test/Transforms/InstCombine/pr39908.ll
index 5d13a33..c36495d 100644
--- a/llvm/test/Transforms/InstCombine/pr39908.ll
+++ b/llvm/test/Transforms/InstCombine/pr39908.ll
@@ -19,7 +19,7 @@ define i1 @test(ptr %p, i32 %n) {
; Same test using 64-bit indices.
define i1 @test64(ptr %p, i64 %n) {
; CHECK-LABEL: @test64(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[N:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -32,7 +32,7 @@ define i1 @test64(ptr %p, i64 %n) {
; Here the offset overflows and is treated modulo 2^32. This is UB.
define i1 @test64_overflow(ptr %p, i64 %n) {
; CHECK-LABEL: @test64_overflow(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[N:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/pr58901.ll b/llvm/test/Transforms/InstCombine/pr58901.ll
index 1eea4b5..3d836aa 100644
--- a/llvm/test/Transforms/InstCombine/pr58901.ll
+++ b/llvm/test/Transforms/InstCombine/pr58901.ll
@@ -15,7 +15,8 @@ define ptr @f1(ptr %arg, i64 %arg1) {
define ptr @f2(ptr %arg, i64 %arg1) {
; CHECK-LABEL: @f2(
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[ARG:%.*]], i64 72
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [6 x i32], ptr [[TMP1]], i64 [[ARG1:%.*]], i64 [[ARG1]]
+; CHECK-NEXT: [[DOTSPLIT:%.*]] = getelementptr [6 x i32], ptr [[TMP1]], i64 [[ARG1:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [6 x i32], ptr [[DOTSPLIT]], i64 0, i64 [[ARG1]]
; CHECK-NEXT: ret ptr [[TMP2]]
;
%1 = getelementptr [6 x i32], ptr %arg, i64 3
diff --git a/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll b/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll
index c5282ce..17a9d54 100644
--- a/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll
+++ b/llvm/test/Transforms/InstCombine/ptrtoint-nullgep.ll
@@ -548,10 +548,10 @@ define i64 @fold_ptrtoint_nested_array_two_vars(i64 %x, i64 %y) {
;
; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_ptrtoint_nested_array_two_vars
; INSTCOMBINE-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]]) {
-; INSTCOMBINE-NEXT: [[PTR_IDX:%.*]] = shl i64 [[X]], 2
-; INSTCOMBINE-NEXT: [[PTR_IDX1:%.*]] = shl i64 [[Y]], 1
-; INSTCOMBINE-NEXT: [[PTR_OFFS:%.*]] = add i64 [[PTR_IDX]], [[PTR_IDX1]]
-; INSTCOMBINE-NEXT: ret i64 [[PTR_OFFS]]
+; INSTCOMBINE-NEXT: [[PTR_SPLIT_IDX:%.*]] = shl i64 [[X]], 2
+; INSTCOMBINE-NEXT: [[PTR_IDX:%.*]] = shl i64 [[Y]], 1
+; INSTCOMBINE-NEXT: [[RET:%.*]] = add i64 [[PTR_SPLIT_IDX]], [[PTR_IDX]]
+; INSTCOMBINE-NEXT: ret i64 [[RET]]
;
%ptr = getelementptr [2 x i16], ptr addrspace(1) null, i64 %x, i64 %y
@@ -574,10 +574,10 @@ define i64 @fold_ptrtoint_nested_array_two_vars_plus_zero(i64 %x, i64 %y) {
;
; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_ptrtoint_nested_array_two_vars_plus_zero
; INSTCOMBINE-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]]) {
-; INSTCOMBINE-NEXT: [[PTR_IDX:%.*]] = shl i64 [[X]], 3
-; INSTCOMBINE-NEXT: [[PTR_IDX1:%.*]] = shl i64 [[Y]], 2
-; INSTCOMBINE-NEXT: [[PTR_OFFS:%.*]] = add i64 [[PTR_IDX]], [[PTR_IDX1]]
-; INSTCOMBINE-NEXT: ret i64 [[PTR_OFFS]]
+; INSTCOMBINE-NEXT: [[PTR_SPLIT_IDX:%.*]] = shl i64 [[X]], 3
+; INSTCOMBINE-NEXT: [[PTR_IDX:%.*]] = shl i64 [[Y]], 2
+; INSTCOMBINE-NEXT: [[RET:%.*]] = add i64 [[PTR_SPLIT_IDX]], [[PTR_IDX]]
+; INSTCOMBINE-NEXT: ret i64 [[RET]]
;
%ptr = getelementptr [2 x [2 x i16]], ptr addrspace(1) null, i64 %x, i64 %y, i64 0
%ret = ptrtoint ptr addrspace(1) %ptr to i64
@@ -599,11 +599,11 @@ define i64 @fold_ptrtoint_nested_array_two_vars_plus_const(i64 %x, i64 %y) {
;
; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_ptrtoint_nested_array_two_vars_plus_const
; INSTCOMBINE-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]]) {
-; INSTCOMBINE-NEXT: [[PTR_IDX:%.*]] = shl i64 [[X]], 3
-; INSTCOMBINE-NEXT: [[PTR_IDX1:%.*]] = shl i64 [[Y]], 2
-; INSTCOMBINE-NEXT: [[PTR_OFFS:%.*]] = add i64 [[PTR_IDX]], [[PTR_IDX1]]
-; INSTCOMBINE-NEXT: [[PTR_OFFS2:%.*]] = or disjoint i64 [[PTR_OFFS]], 2
-; INSTCOMBINE-NEXT: ret i64 [[PTR_OFFS2]]
+; INSTCOMBINE-NEXT: [[PTR_SPLIT_IDX:%.*]] = shl i64 [[X]], 3
+; INSTCOMBINE-NEXT: [[PTR_IDX:%.*]] = shl i64 [[Y]], 2
+; INSTCOMBINE-NEXT: [[PTR_OFFS:%.*]] = or disjoint i64 [[PTR_IDX]], 2
+; INSTCOMBINE-NEXT: [[RET:%.*]] = add i64 [[PTR_SPLIT_IDX]], [[PTR_OFFS]]
+; INSTCOMBINE-NEXT: ret i64 [[RET]]
;
%ptr = getelementptr [2 x [2 x i16]], ptr addrspace(1) null, i64 %x, i64 %y, i64 1
%ret = ptrtoint ptr addrspace(1) %ptr to i64
@@ -612,12 +612,27 @@ define i64 @fold_ptrtoint_nested_array_two_vars_plus_const(i64 %x, i64 %y) {
; Negative test -- should not be folded since there are multiple GEP uses
define i64 @fold_ptrtoint_nested_nullgep_array_variable_multiple_uses(i64 %x, i64 %y) {
-; ALL-LABEL: define {{[^@]+}}@fold_ptrtoint_nested_nullgep_array_variable_multiple_uses
-; ALL-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]]) {
-; ALL-NEXT: [[PTR:%.*]] = getelementptr [2 x i16], ptr addrspace(1) null, i64 [[X]], i64 [[Y]]
-; ALL-NEXT: call void @use_ptr(ptr addrspace(1) [[PTR]])
-; ALL-NEXT: [[RET:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; ALL-NEXT: ret i64 [[RET]]
+; LLPARSER-LABEL: define {{[^@]+}}@fold_ptrtoint_nested_nullgep_array_variable_multiple_uses
+; LLPARSER-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]]) {
+; LLPARSER-NEXT: [[PTR:%.*]] = getelementptr [2 x i16], ptr addrspace(1) null, i64 [[X]], i64 [[Y]]
+; LLPARSER-NEXT: call void @use_ptr(ptr addrspace(1) [[PTR]])
+; LLPARSER-NEXT: [[RET:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; LLPARSER-NEXT: ret i64 [[RET]]
+;
+; INSTSIMPLIFY-LABEL: define {{[^@]+}}@fold_ptrtoint_nested_nullgep_array_variable_multiple_uses
+; INSTSIMPLIFY-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]]) {
+; INSTSIMPLIFY-NEXT: [[PTR:%.*]] = getelementptr [2 x i16], ptr addrspace(1) null, i64 [[X]], i64 [[Y]]
+; INSTSIMPLIFY-NEXT: call void @use_ptr(ptr addrspace(1) [[PTR]])
+; INSTSIMPLIFY-NEXT: [[RET:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; INSTSIMPLIFY-NEXT: ret i64 [[RET]]
+;
+; INSTCOMBINE-LABEL: define {{[^@]+}}@fold_ptrtoint_nested_nullgep_array_variable_multiple_uses
+; INSTCOMBINE-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]]) {
+; INSTCOMBINE-NEXT: [[PTR_SPLIT:%.*]] = getelementptr [2 x i16], ptr addrspace(1) null, i64 [[X]]
+; INSTCOMBINE-NEXT: [[PTR:%.*]] = getelementptr [2 x i16], ptr addrspace(1) [[PTR_SPLIT]], i64 0, i64 [[Y]]
+; INSTCOMBINE-NEXT: call void @use_ptr(ptr addrspace(1) [[PTR]])
+; INSTCOMBINE-NEXT: [[RET:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; INSTCOMBINE-NEXT: ret i64 [[RET]]
;
%ptr = getelementptr [2 x i16], ptr addrspace(1) null, i64 %x, i64 %y
call void @use_ptr(ptr addrspace(1) %ptr)
diff --git a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
index c637481..86e586e 100644
--- a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
+++ b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
@@ -8,12 +8,11 @@ define i8 @simple_recurrence_intrinsic_smax(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SMAX_ACC:%.*]] = phi i8 [ [[SMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[SMAX]] = call i8 @llvm.smax.i8(i8 [[SMAX_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[SMAX:%.*]] = call i8 @llvm.smax.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[SMAX]]
;
entry:
@@ -38,12 +37,11 @@ define i8 @simple_recurrence_intrinsic_smin(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SMIN_ACC:%.*]] = phi i8 [ [[SMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[SMIN]] = call i8 @llvm.smin.i8(i8 [[SMIN_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[SMIN:%.*]] = call i8 @llvm.smin.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[SMIN]]
;
entry:
@@ -68,12 +66,11 @@ define i8 @simple_recurrence_intrinsic_umax(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[UMAX_ACC:%.*]] = phi i8 [ [[UMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[UMAX_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[UMAX:%.*]] = call i8 @llvm.umax.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[UMAX]]
;
entry:
@@ -98,12 +95,11 @@ define i8 @simple_recurrence_intrinsic_umin(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[UMIN_ACC:%.*]] = phi i8 [ [[UMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[UMIN]] = call i8 @llvm.umin.i8(i8 [[UMIN_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[UMIN:%.*]] = call i8 @llvm.umin.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[UMIN]]
;
entry:
@@ -128,12 +124,11 @@ define float @simple_recurrence_intrinsic_maxnum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX]] = call float @llvm.maxnum.f32(float [[FMAX_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMAX:%.*]] = call float @llvm.maxnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMAX]]
;
entry:
@@ -157,12 +152,11 @@ define float @simple_recurrence_intrinsic_minnum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN]] = call float @llvm.minnum.f32(float [[FMIN_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMIN:%.*]] = call float @llvm.minnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMIN]]
;
entry:
@@ -186,12 +180,11 @@ define float @simple_recurrence_intrinsic_maximum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX]] = call nnan float @llvm.maximum.f32(float [[FMAX_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMAX:%.*]] = call nnan float @llvm.maximum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMAX]]
;
entry:
@@ -215,12 +208,11 @@ define float @simple_recurrence_intrinsic_minimum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN]] = call nnan float @llvm.minimum.f32(float [[FMIN_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMIN:%.*]] = call nnan float @llvm.minimum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMIN]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/strcmp-3.ll b/llvm/test/Transforms/InstCombine/strcmp-3.ll
index 66910b4..2c4012b 100644
--- a/llvm/test/Transforms/InstCombine/strcmp-3.ll
+++ b/llvm/test/Transforms/InstCombine/strcmp-3.ll
@@ -25,7 +25,7 @@ define i32 @fold_strcmp_a5i0_a5i1_to_0() {
define i32 @call_strcmp_a5i0_a5iI(i64 %I) {
; CHECK-LABEL: @call_strcmp_a5i0_a5iI(
-; CHECK-NEXT: [[Q:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 [[I:%.*]], i64 0
+; CHECK-NEXT: [[Q:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 [[I:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(ptr noundef nonnull dereferenceable(4) @a5, ptr noundef nonnull dereferenceable(1) [[Q]])
; CHECK-NEXT: ret i32 [[CMP]]
;
@@ -40,7 +40,7 @@ define i32 @call_strcmp_a5i0_a5iI(i64 %I) {
define i32 @call_strcmp_a5iI_a5i0(i64 %I) {
; CHECK-LABEL: @call_strcmp_a5iI_a5i0(
-; CHECK-NEXT: [[P:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 [[I:%.*]], i64 0
+; CHECK-NEXT: [[P:%.*]] = getelementptr [5 x [4 x i8]], ptr @a5, i64 0, i64 [[I:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(ptr noundef nonnull dereferenceable(1) [[P]], ptr noundef nonnull dereferenceable(4) @a5)
; CHECK-NEXT: ret i32 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll
index 45e5686..0db5cbe 100644
--- a/llvm/test/Transforms/InstCombine/sub-gep.ll
+++ b/llvm/test/Transforms/InstCombine/sub-gep.ll
@@ -394,7 +394,7 @@ define i64 @negative_ptrtoint_sub_zext_ptrtoint(ptr %p, i32 %offset) {
define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) {
; CHECK-LABEL: @test25_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[A:%.*]] to i16
; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i16 [[TMP1]], 1
; CHECK-NEXT: [[GEPDIFF:%.*]] = add nsw i16 [[B_IDX]], -84
; CHECK-NEXT: ret i16 [[GEPDIFF]]
diff --git a/llvm/test/Transforms/InstCombine/sub.ll b/llvm/test/Transforms/InstCombine/sub.ll
index 81ecd85..439b599 100644
--- a/llvm/test/Transforms/InstCombine/sub.ll
+++ b/llvm/test/Transforms/InstCombine/sub.ll
@@ -1141,10 +1141,10 @@ define i64 @test59(ptr %foo, i64 %i) {
define i64 @test60(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nsw i64 [[J:%.*]], 100
-; CHECK-NEXT: [[GEP1_OFFS:%.*]] = add nsw i64 [[GEP1_IDX]], [[I:%.*]]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[FOO:%.*]], i64 [[GEP1_OFFS]]
-; CHECK-NEXT: [[GEPDIFF:%.*]] = add nsw i64 [[GEP1_OFFS]], -4200
+; CHECK-NEXT: [[GEP1_SPLIT_IDX:%.*]] = mul nsw i64 [[J:%.*]], 100
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[GEP1_SPLIT_IDX]], [[I:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[FOO:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[GEPDIFF:%.*]] = add nsw i64 [[TMP1]], -4200
; CHECK-NEXT: store ptr [[GEP1]], ptr @dummy_global1, align 8
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
@@ -1160,10 +1160,10 @@ define i64 @test60(ptr %foo, i64 %i, i64 %j) {
define i64 @test60_nuw(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_nuw(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nuw i64 [[J:%.*]], 100
-; CHECK-NEXT: [[GEP1_OFFS:%.*]] = add nuw i64 [[GEP1_IDX]], [[I:%.*]]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw i8, ptr [[FOO:%.*]], i64 [[GEP1_OFFS]]
-; CHECK-NEXT: [[GEPDIFF:%.*]] = add i64 [[GEP1_OFFS]], -4200
+; CHECK-NEXT: [[GEP1_SPLIT_IDX:%.*]] = mul nuw i64 [[J:%.*]], 100
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw i64 [[GEP1_SPLIT_IDX]], [[I:%.*]]
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw i8, ptr [[FOO:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[GEPDIFF:%.*]] = add i64 [[TMP1]], -4200
; CHECK-NEXT: store ptr [[GEP1]], ptr @dummy_global1, align 8
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
@@ -1178,10 +1178,10 @@ define i64 @test60_nuw(ptr %foo, i64 %i, i64 %j) {
define i64 @test61(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test61(
-; CHECK-NEXT: [[GEP2_IDX:%.*]] = mul nsw i64 [[J:%.*]], 100
-; CHECK-NEXT: [[GEP2_OFFS:%.*]] = add nsw i64 [[GEP2_IDX]], [[I:%.*]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i8, ptr [[FOO:%.*]], i64 [[GEP2_OFFS]]
-; CHECK-NEXT: [[GEPDIFF:%.*]] = sub nsw i64 4200, [[GEP2_OFFS]]
+; CHECK-NEXT: [[GEP2_SPLIT_IDX:%.*]] = mul nsw i64 [[J:%.*]], 100
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[GEP2_SPLIT_IDX]], [[I:%.*]]
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i8, ptr [[FOO:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[GEPDIFF:%.*]] = sub nsw i64 4200, [[TMP1]]
; CHECK-NEXT: store ptr [[GEP2]], ptr @dummy_global2, align 8
; CHECK-NEXT: ret i64 [[GEPDIFF]]
;
diff --git a/llvm/test/Transforms/InstCombine/vector_gep1-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vector_gep1-inseltpoison.ll
index b882d7d..4c7a28b 100644
--- a/llvm/test/Transforms/InstCombine/vector_gep1-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vector_gep1-inseltpoison.ll
@@ -55,7 +55,7 @@ define <2 x i1> @test5(<2 x ptr> %a) {
define <2 x ptr> @test7(<2 x ptr> %a) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>, i32 0
+; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>
; CHECK-NEXT: ret <2 x ptr> [[W]]
;
%w = getelementptr {i32, i32}, <2 x ptr> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/InstCombine/vector_gep1.ll b/llvm/test/Transforms/InstCombine/vector_gep1.ll
index 92de122..88af1c4 100644
--- a/llvm/test/Transforms/InstCombine/vector_gep1.ll
+++ b/llvm/test/Transforms/InstCombine/vector_gep1.ll
@@ -55,7 +55,7 @@ define <2 x i1> @test5(<2 x ptr> %a) {
define <2 x ptr> @test7(<2 x ptr> %a) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>, i32 0
+; CHECK-NEXT: [[W:%.*]] = getelementptr { i32, i32 }, <2 x ptr> [[A:%.*]], <2 x i64> <i64 5, i64 9>
; CHECK-NEXT: ret <2 x ptr> [[W]]
;
%w = getelementptr {i32, i32}, <2 x ptr> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vectorgep-crash.ll b/llvm/test/Transforms/InstCombine/vectorgep-crash.ll
index 4aed1c5..2645500 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vectorgep-crash.ll
+++ b/llvm/test/Transforms/InstCombine/vectorgep-crash.ll
@@ -10,10 +10,14 @@ target triple = "x86_64-unknown-linux-gnu"
%Partials = type { [2 x double] }
%Partials.73 = type { [2 x %Dual.72] }
-; Function Attrs: sspreq
-define <8 x ptr> @"julia_axpy!_65480"(ptr %arg1, <8 x i64> %arg2) {
+define <8 x ptr> @test_vector_gep(ptr %arg1, <8 x i64> %arg2) {
+; CHECK-LABEL: define <8 x ptr> @test_vector_gep(
+; CHECK-SAME: ptr [[ARG1:%.*]], <8 x i64> [[ARG2:%.*]]) {
+; CHECK-NEXT: [[TOP:.*:]]
+; CHECK-NEXT: [[VECTORGEP14:%.*]] = getelementptr inbounds [[DUAL:%.*]], ptr [[ARG1]], <8 x i64> [[ARG2]], i32 1, i32 0, i64 0, i32 1
+; CHECK-NEXT: ret <8 x ptr> [[VECTORGEP14]]
+;
top:
-; CHECK: %VectorGep14 = getelementptr inbounds %Dual, ptr %arg1, <8 x i64> %arg2, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
%VectorGep14 = getelementptr inbounds %Dual, ptr %arg1, <8 x i64> %arg2, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
%0 = bitcast <8 x ptr> %VectorGep14 to <8 x ptr>
ret <8 x ptr> %0
diff --git a/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll
index 65aaf72..cd35401 100644
--- a/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll
+++ b/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll
@@ -33,11 +33,10 @@ define void @scev_expand_ptrtoint(i8 %x, ptr %start) {
; CHECK: [[LOOP_3_PREHEADER]]:
; CHECK-NEXT: [[INDVAR_LCSSA:%.*]] = phi i64 [ [[INDVAR]], %[[LOOP_2_HEADER]] ], [ [[INDVAR]], %[[LOOP_2_HEADER]] ]
; CHECK-NEXT: [[PTR_IV_2_LCSSA:%.*]] = phi ptr [ [[PTR_IV_2]], %[[LOOP_2_HEADER]] ], [ [[PTR_IV_2]], %[[LOOP_2_HEADER]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = sub i64 0, [[START1]]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i64 1, [[START1]]
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR_IV_1_LCSSA]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP0]]
-; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[CMP_EXT]], [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[CMP_EXT]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDVAR_LCSSA]], [[TMP4]]
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP5]]
; CHECK-NEXT: [[STRLEN:%.*]] = call i64 @strlen(ptr [[SCEVGEP]])
diff --git a/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll b/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll
index 8baded8..38d559f 100644
--- a/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll
+++ b/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll
@@ -485,12 +485,206 @@ exit: ; preds = %vector.body
!0 = !{!0, !1}
!1 = !{!"llvm.loop.isvectorized", i32 1}
+; On Cortex-A55 we should runtime unroll the scalar epilogue loop, but not the
+; vector loop.
+define void @scalar_epilogue(ptr %p, i8 %splat.scalar, i64 %n) {
+; APPLE-LABEL: define void @scalar_epilogue(
+; APPLE-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; APPLE-NEXT: [[ENTRY:.*]]:
+; APPLE-NEXT: [[MIN_ITERS_CHECK7:%.*]] = icmp ult i64 [[N]], 32
+; APPLE-NEXT: br i1 [[MIN_ITERS_CHECK7]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; APPLE: [[VECTOR_PH]]:
+; APPLE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; APPLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; APPLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; APPLE-NEXT: br label %[[VECTOR_BODY:.*]]
+; APPLE: [[VECTOR_BODY]]:
+; APPLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; APPLE-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INDEX]]
+; APPLE-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
+; APPLE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
+; APPLE-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
+; APPLE-NEXT: [[TMP2:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; APPLE-NEXT: [[TMP3:%.*]] = add <16 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT]]
+; APPLE-NEXT: store <16 x i8> [[TMP2]], ptr [[TMP0]], align 1
+; APPLE-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP1]], align 1
+; APPLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; APPLE-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; APPLE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; APPLE: [[MIDDLE_BLOCK]]:
+; APPLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; APPLE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; APPLE: [[SCALAR_REMAINDER_PREHEADER]]:
+; APPLE-NEXT: [[IV_SCALAR_LOOP_PH:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; APPLE-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; APPLE: [[SCALAR_REMAINDER]]:
+; APPLE-NEXT: [[I_06:%.*]] = phi i64 [ [[INC:%.*]], %[[SCALAR_REMAINDER]] ], [ [[IV_SCALAR_LOOP_PH]], %[[SCALAR_REMAINDER_PREHEADER]] ]
+; APPLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[I_06]]
+; APPLE-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; APPLE-NEXT: [[ADD:%.*]] = add i8 [[TMP8]], [[SPLAT_SCALAR]]
+; APPLE-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; APPLE-NEXT: [[INC]] = add nuw i64 [[I_06]], 1
+; APPLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; APPLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP5:![0-9]+]]
+; APPLE: [[EXIT_LOOPEXIT]]:
+; APPLE-NEXT: br label %[[EXIT]]
+; APPLE: [[EXIT]]:
+; APPLE-NEXT: ret void
+;
+; CORTEXA55-LABEL: define void @scalar_epilogue(
+; CORTEXA55-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CORTEXA55-NEXT: [[ENTRY:.*]]:
+; CORTEXA55-NEXT: [[MIN_ITERS_CHECK7:%.*]] = icmp ult i64 [[N]], 32
+; CORTEXA55-NEXT: br i1 [[MIN_ITERS_CHECK7]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; CORTEXA55: [[VECTOR_PH]]:
+; CORTEXA55-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; CORTEXA55-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; CORTEXA55-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; CORTEXA55-NEXT: br label %[[VECTOR_BODY:.*]]
+; CORTEXA55: [[VECTOR_BODY]]:
+; CORTEXA55-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CORTEXA55-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INDEX]]
+; CORTEXA55-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
+; CORTEXA55-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
+; CORTEXA55-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
+; CORTEXA55-NEXT: [[TMP2:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CORTEXA55-NEXT: [[TMP3:%.*]] = add <16 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT]]
+; CORTEXA55-NEXT: store <16 x i8> [[TMP2]], ptr [[TMP0]], align 1
+; CORTEXA55-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP1]], align 1
+; CORTEXA55-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CORTEXA55-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CORTEXA55-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; CORTEXA55: [[MIDDLE_BLOCK]]:
+; CORTEXA55-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CORTEXA55-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; CORTEXA55: [[SCALAR_REMAINDER_PREHEADER]]:
+; CORTEXA55-NEXT: [[I_06_PH:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; CORTEXA55-NEXT: [[TMP8:%.*]] = sub i64 [[N]], [[I_06_PH]]
+; CORTEXA55-NEXT: [[TMP9:%.*]] = add i64 [[N]], -1
+; CORTEXA55-NEXT: [[TMP10:%.*]] = sub i64 [[TMP9]], [[I_06_PH]]
+; CORTEXA55-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP8]], 3
+; CORTEXA55-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
+; CORTEXA55-NEXT: br i1 [[LCMP_MOD]], label %[[SCALAR_REMAINDER_PROL_PREHEADER:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_PREHEADER]]:
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER_PROL:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL]]:
+; CORTEXA55-NEXT: [[ARRAYIDX_PROL:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[I_06_PH]]
+; CORTEXA55-NEXT: [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX_PROL]], align 1
+; CORTEXA55-NEXT: [[ADD_PROL:%.*]] = add i8 [[TMP11]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_PROL]], ptr [[ARRAYIDX_PROL]], align 1
+; CORTEXA55-NEXT: [[INC_PROL:%.*]] = add nuw i64 [[I_06_PH]], 1
+; CORTEXA55-NEXT: [[PROL_ITER_CMP:%.*]] = icmp ne i64 1, [[XTRAITER]]
+; CORTEXA55-NEXT: br i1 [[PROL_ITER_CMP]], label %[[SCALAR_REMAINDER_PROL_1:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_1]]:
+; CORTEXA55-NEXT: [[ARRAYIDX_PROL_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL]]
+; CORTEXA55-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX_PROL_1]], align 1
+; CORTEXA55-NEXT: [[ADD_PROL_1:%.*]] = add i8 [[TMP12]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_PROL_1]], ptr [[ARRAYIDX_PROL_1]], align 1
+; CORTEXA55-NEXT: [[INC_PROL_1:%.*]] = add nuw i64 [[I_06_PH]], 2
+; CORTEXA55-NEXT: [[PROL_ITER_CMP_1:%.*]] = icmp ne i64 2, [[XTRAITER]]
+; CORTEXA55-NEXT: br i1 [[PROL_ITER_CMP_1]], label %[[SCALAR_REMAINDER_PROL_2:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_2]]:
+; CORTEXA55-NEXT: [[ARRAYIDX_PROL_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_1]]
+; CORTEXA55-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX_PROL_2]], align 1
+; CORTEXA55-NEXT: [[ADD_PROL_2:%.*]] = add i8 [[TMP13]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_PROL_2]], ptr [[ARRAYIDX_PROL_2]], align 1
+; CORTEXA55-NEXT: [[INC_PROL_2:%.*]] = add nuw i64 [[I_06_PH]], 3
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]:
+; CORTEXA55-NEXT: [[IV_SCALAR_LOOP_UNR_PH:%.*]] = phi i64 [ [[INC_PROL]], %[[SCALAR_REMAINDER_PROL]] ], [ [[INC_PROL_1]], %[[SCALAR_REMAINDER_PROL_1]] ], [ [[INC_PROL_2]], %[[SCALAR_REMAINDER_PROL_2]] ]
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_LOOPEXIT]]:
+; CORTEXA55-NEXT: [[IV_SCALAR_LOOP_UNR:%.*]] = phi i64 [ [[I_06_PH]], %[[SCALAR_REMAINDER_PREHEADER]] ], [ [[IV_SCALAR_LOOP_UNR_PH]], %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]] ]
+; CORTEXA55-NEXT: [[TMP14:%.*]] = icmp ult i64 [[TMP10]], 3
+; CORTEXA55-NEXT: br i1 [[TMP14]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER_NEW:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PREHEADER_NEW]]:
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER]]:
+; CORTEXA55-NEXT: [[I_06:%.*]] = phi i64 [ [[IV_SCALAR_LOOP_UNR]], %[[SCALAR_REMAINDER_PREHEADER_NEW]] ], [ [[INC_3:%.*]], %[[SCALAR_REMAINDER]] ]
+; CORTEXA55-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[I_06]]
+; CORTEXA55-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CORTEXA55-NEXT: [[ADD:%.*]] = add i8 [[TMP15]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; CORTEXA55-NEXT: [[INC:%.*]] = add nuw i64 [[I_06]], 1
+; CORTEXA55-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC]]
+; CORTEXA55-NEXT: [[TMP16:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; CORTEXA55-NEXT: [[ADD_1:%.*]] = add i8 [[TMP16]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX_1]], align 1
+; CORTEXA55-NEXT: [[INC_1:%.*]] = add nuw i64 [[I_06]], 2
+; CORTEXA55-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_1]]
+; CORTEXA55-NEXT: [[TMP17:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; CORTEXA55-NEXT: [[ADD_2:%.*]] = add i8 [[TMP17]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_2]], ptr [[ARRAYIDX_2]], align 1
+; CORTEXA55-NEXT: [[INC_2:%.*]] = add nuw i64 [[I_06]], 3
+; CORTEXA55-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_2]]
+; CORTEXA55-NEXT: [[TMP18:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; CORTEXA55-NEXT: [[ADD_3:%.*]] = add i8 [[TMP18]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_3]], ptr [[ARRAYIDX_3]], align 1
+; CORTEXA55-NEXT: [[INC_3]] = add nuw i64 [[I_06]], 4
+; CORTEXA55-NEXT: [[EXITCOND_NOT_3:%.*]] = icmp eq i64 [[INC_3]], [[N]]
+; CORTEXA55-NEXT: br i1 [[EXITCOND_NOT_3]], label %[[EXIT_LOOPEXIT_UNR_LCSSA:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CORTEXA55: [[EXIT_LOOPEXIT_UNR_LCSSA]]:
+; CORTEXA55-NEXT: br label %[[EXIT_LOOPEXIT]]
+; CORTEXA55: [[EXIT_LOOPEXIT]]:
+; CORTEXA55-NEXT: br label %[[EXIT]]
+; CORTEXA55: [[EXIT]]:
+; CORTEXA55-NEXT: ret void
+;
+entry:
+ %min.iters.check = icmp ult i64 %n, 32
+ br i1 %min.iters.check, label %scalar.remainder, label %vector.ph
+
+vector.ph:
+ %n.vec = and i64 %n, -32
+ %broadcast.splatinsert = insertelement <16 x i8> poison, i8 %splat.scalar, i64 0
+ %broadcast.splat = shufflevector <16 x i8> %broadcast.splatinsert, <16 x i8> poison, <16 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body:
+ %iv = phi i64 [ 0, %vector.ph ], [ %iv.next, %vector.body ]
+ %gep.p.iv = getelementptr inbounds nuw i8, ptr %p, i64 %iv
+ %gep.p.iv.16 = getelementptr inbounds nuw i8, ptr %gep.p.iv, i64 16
+ %wide.load = load <16 x i8>, ptr %gep.p.iv, align 1
+ %wide.load.2 = load <16 x i8>, ptr %gep.p.iv.16, align 1
+ %add.broadcast = add <16 x i8> %wide.load, %broadcast.splat
+ %add.broadcast.2 = add <16 x i8> %wide.load.2, %broadcast.splat
+ store <16 x i8> %add.broadcast, ptr %gep.p.iv, align 1
+ store <16 x i8> %add.broadcast.2, ptr %gep.p.iv.16, align 1
+ %iv.next = add nuw i64 %iv, 32
+ %exit.cond = icmp eq i64 %iv.next, %n.vec
+ br i1 %exit.cond, label %middle.block, label %vector.body, !llvm.loop !2
+
+middle.block:
+ %cmp.n = icmp eq i64 %n, %n.vec
+ br i1 %cmp.n, label %exit, label %scalar.remainder
+
+scalar.remainder:
+ %iv.scalar.loop = phi i64 [ %inc, %scalar.remainder ], [ %n.vec, %middle.block ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw i8, ptr %p, i64 %iv.scalar.loop
+ %scalar.load = load i8, ptr %arrayidx, align 1
+ %add = add i8 %scalar.load, %splat.scalar
+ store i8 %add, ptr %arrayidx, align 1
+ %inc = add nuw i64 %iv.scalar.loop, 1
+ %exitcond.not = icmp eq i64 %inc, %n
+ br i1 %exitcond.not, label %exit, label %scalar.remainder, !llvm.loop !3
+
+exit:
+ ret void
+}
+
+!2 = distinct !{!2, !1}
+!3 = distinct !{!3, !1}
+
;.
; APPLE: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
; APPLE: [[META1]] = !{!"llvm.loop.unroll.disable"}
; APPLE: [[LOOP2]] = distinct !{[[LOOP2]], [[META3:![0-9]+]]}
; APPLE: [[META3]] = !{!"llvm.loop.isvectorized", i32 1}
+; APPLE: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]]}
+; APPLE: [[LOOP5]] = distinct !{[[LOOP5]], [[META3]]}
;.
; CORTEXA55: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
; CORTEXA55: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CORTEXA55: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CORTEXA55: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll b/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll
new file mode 100644
index 0000000..811d055
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll
@@ -0,0 +1,603 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -p loop-unroll -mtriple riscv64 -mattr=+v,+f -S %s | FileCheck %s --check-prefixes=COMMON,CHECK
+; RUN: opt -p loop-unroll -mtriple=riscv64 -mcpu=sifive-s76 -S %s | FileCheck %s --check-prefixes=COMMON,SIFIVE
+
+define void @reverse(ptr %dst, ptr %src, i64 %len) {
+; CHECK-LABEL: define void @reverse(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = sub nsw i64 [[LEN]], [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[ARRAYIDX2]], align 16
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[LEN]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SIFIVE-LABEL: define void @reverse(
+; SIFIVE-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]]) #[[ATTR0:[0-9]+]] {
+; SIFIVE-NEXT: [[ENTRY:.*]]:
+; SIFIVE-NEXT: [[TMP2:%.*]] = add i64 [[LEN]], -1
+; SIFIVE-NEXT: [[XTRAITER:%.*]] = and i64 [[LEN]], 7
+; SIFIVE-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], 7
+; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]]
+; SIFIVE: [[ENTRY_NEW]]:
+; SIFIVE-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[LEN]], [[XTRAITER]]
+; SIFIVE-NEXT: br label %[[FOR_BODY:.*]]
+; SIFIVE: [[FOR_BODY]]:
+; SIFIVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_7:%.*]], %[[FOR_BODY]] ]
+; SIFIVE-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_7:%.*]], %[[FOR_BODY]] ]
+; SIFIVE-NEXT: [[TMP0:%.*]] = sub nsw i64 [[LEN]], [[IV]]
+; SIFIVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP0]]
+; SIFIVE-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV]]
+; SIFIVE-NEXT: store <4 x float> [[TMP1]], ptr [[ARRAYIDX2]], align 16
+; SIFIVE-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1
+; SIFIVE-NEXT: [[TMP4:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT]]
+; SIFIVE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP4]]
+; SIFIVE-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr [[ARRAYIDX_1]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT]]
+; SIFIVE-NEXT: store <4 x float> [[TMP5]], ptr [[ARRAYIDX2_1]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2
+; SIFIVE-NEXT: [[TMP6:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_1]]
+; SIFIVE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP6]]
+; SIFIVE-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX_2]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_1]]
+; SIFIVE-NEXT: store <4 x float> [[TMP7]], ptr [[ARRAYIDX2_2]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3
+; SIFIVE-NEXT: [[TMP8:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_2]]
+; SIFIVE-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP8]]
+; SIFIVE-NEXT: [[TMP9:%.*]] = load <4 x float>, ptr [[ARRAYIDX_3]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_2]]
+; SIFIVE-NEXT: store <4 x float> [[TMP9]], ptr [[ARRAYIDX2_3]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_3:%.*]] = add nuw nsw i64 [[IV]], 4
+; SIFIVE-NEXT: [[TMP10:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_3]]
+; SIFIVE-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP10]]
+; SIFIVE-NEXT: [[TMP11:%.*]] = load <4 x float>, ptr [[ARRAYIDX_4]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_4:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_3]]
+; SIFIVE-NEXT: store <4 x float> [[TMP11]], ptr [[ARRAYIDX2_4]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_4:%.*]] = add nuw nsw i64 [[IV]], 5
+; SIFIVE-NEXT: [[TMP12:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_4]]
+; SIFIVE-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP12]]
+; SIFIVE-NEXT: [[TMP13:%.*]] = load <4 x float>, ptr [[ARRAYIDX_5]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_4]]
+; SIFIVE-NEXT: store <4 x float> [[TMP13]], ptr [[ARRAYIDX2_5]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_5:%.*]] = add nuw nsw i64 [[IV]], 6
+; SIFIVE-NEXT: [[TMP14:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_5]]
+; SIFIVE-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP14]]
+; SIFIVE-NEXT: [[TMP15:%.*]] = load <4 x float>, ptr [[ARRAYIDX_6]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_5]]
+; SIFIVE-NEXT: store <4 x float> [[TMP15]], ptr [[ARRAYIDX2_6]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_6:%.*]] = add nuw nsw i64 [[IV]], 7
+; SIFIVE-NEXT: [[TMP16:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_6]]
+; SIFIVE-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP16]]
+; SIFIVE-NEXT: [[TMP17:%.*]] = load <4 x float>, ptr [[ARRAYIDX_7]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_7:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_6]]
+; SIFIVE-NEXT: store <4 x float> [[TMP17]], ptr [[ARRAYIDX2_7]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV]], 8
+; SIFIVE-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8
+; SIFIVE-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]]
+; SIFIVE-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_BODY]]
+; SIFIVE: [[EXIT_UNR_LCSSA_LOOPEXIT]]:
+; SIFIVE-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[FOR_BODY]] ]
+; SIFIVE-NEXT: br label %[[EXIT_UNR_LCSSA]]
+; SIFIVE: [[EXIT_UNR_LCSSA]]:
+; SIFIVE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ]
+; SIFIVE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
+; SIFIVE-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[EXIT:.*]]
+; SIFIVE: [[FOR_BODY_EPIL_PREHEADER]]:
+; SIFIVE-NEXT: br label %[[FOR_BODY_EPIL:.*]]
+; SIFIVE: [[FOR_BODY_EPIL]]:
+; SIFIVE-NEXT: [[TMP18:%.*]] = sub nsw i64 [[LEN]], [[IV_UNR]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP18]]
+; SIFIVE-NEXT: [[TMP19:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_UNR]]
+; SIFIVE-NEXT: store <4 x float> [[TMP19]], ptr [[ARRAYIDX2_EPIL]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL:%.*]] = add nuw nsw i64 [[IV_UNR]], 1
+; SIFIVE-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i64 1, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP]], label %[[FOR_BODY_EPIL_1:.*]], label %[[EXIT_EPILOG_LCSSA:.*]]
+; SIFIVE: [[FOR_BODY_EPIL_1]]:
+; SIFIVE-NEXT: [[TMP20:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP20]]
+; SIFIVE-NEXT: [[TMP21:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_1]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL]]
+; SIFIVE-NEXT: store <4 x float> [[TMP21]], ptr [[ARRAYIDX2_EPIL_1]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_1:%.*]] = add nuw nsw i64 [[IV_UNR]], 2
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_1:%.*]] = icmp ne i64 2, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_1]], label %[[FOR_BODY_EPIL_2:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_2]]:
+; SIFIVE-NEXT: [[TMP22:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_1]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP22]]
+; SIFIVE-NEXT: [[TMP23:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_2]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_1]]
+; SIFIVE-NEXT: store <4 x float> [[TMP23]], ptr [[ARRAYIDX2_EPIL_2]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_2:%.*]] = add nuw nsw i64 [[IV_UNR]], 3
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_2:%.*]] = icmp ne i64 3, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_2]], label %[[FOR_BODY_EPIL_3:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_3]]:
+; SIFIVE-NEXT: [[TMP24:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_2]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_3:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP24]]
+; SIFIVE-NEXT: [[TMP25:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_3]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_3:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_2]]
+; SIFIVE-NEXT: store <4 x float> [[TMP25]], ptr [[ARRAYIDX2_EPIL_3]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_3:%.*]] = add nuw nsw i64 [[IV_UNR]], 4
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_3:%.*]] = icmp ne i64 4, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_3]], label %[[FOR_BODY_EPIL_4:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_4]]:
+; SIFIVE-NEXT: [[TMP26:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_3]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_4:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP26]]
+; SIFIVE-NEXT: [[TMP27:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_4]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_4:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_3]]
+; SIFIVE-NEXT: store <4 x float> [[TMP27]], ptr [[ARRAYIDX2_EPIL_4]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_4:%.*]] = add nuw nsw i64 [[IV_UNR]], 5
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_4:%.*]] = icmp ne i64 5, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_4]], label %[[FOR_BODY_EPIL_5:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_5]]:
+; SIFIVE-NEXT: [[TMP28:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_4]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_5:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP28]]
+; SIFIVE-NEXT: [[TMP29:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_5]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_5:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_4]]
+; SIFIVE-NEXT: store <4 x float> [[TMP29]], ptr [[ARRAYIDX2_EPIL_5]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_5:%.*]] = add nuw nsw i64 [[IV_UNR]], 6
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_5:%.*]] = icmp ne i64 6, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_5]], label %[[FOR_BODY_EPIL_6:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_6]]:
+; SIFIVE-NEXT: [[TMP30:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_5]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_6:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP30]]
+; SIFIVE-NEXT: [[TMP31:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_6]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_6:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_5]]
+; SIFIVE-NEXT: store <4 x float> [[TMP31]], ptr [[ARRAYIDX2_EPIL_6]], align 16
+; SIFIVE-NEXT: br label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[EXIT_EPILOG_LCSSA]]:
+; SIFIVE-NEXT: br label %[[EXIT]]
+; SIFIVE: [[EXIT]]:
+; SIFIVE-NEXT: ret void
+;
+entry: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %1 = sub nsw i64 %len, %iv
+ %arrayidx = getelementptr inbounds <4 x float>, ptr %src, i64 %1
+ %2 = load <4 x float>, ptr %arrayidx, align 16
+ %arrayidx2 = getelementptr inbounds nuw <4 x float>, ptr %dst, i64 %iv
+ store <4 x float> %2, ptr %arrayidx2, align 16
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %len
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit: ; preds = %for.body, %entry
+ ret void
+}
+
+
+define void @saxpy_tripcount8_full_unroll(ptr %dst, ptr %src, float %a) {
+; COMMON-LABEL: define void @saxpy_tripcount8_full_unroll(
+; COMMON-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; COMMON-NEXT: [[ENTRY:.*:]]
+; COMMON-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; COMMON-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; COMMON-NEXT: br label %[[VECTOR_BODY:.*]]
+; COMMON: [[VECTOR_BODY]]:
+; COMMON-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[SRC]], align 4
+; COMMON-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[DST]], align 4
+; COMMON-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; COMMON-NEXT: store <4 x float> [[TMP0]], ptr [[DST]], align 4
+; COMMON-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 4
+; COMMON-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; COMMON-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 4
+; COMMON-NEXT: [[WIDE_LOAD12_1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; COMMON-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_1]], <4 x float> [[WIDE_LOAD12_1]])
+; COMMON-NEXT: store <4 x float> [[TMP3]], ptr [[TMP2]], align 4
+; COMMON-NEXT: ret void
+;
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %a, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds nuw float, ptr %src, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = getelementptr inbounds nuw float, ptr %dst, i64 %index
+ %wide.load12 = load <4 x float>, ptr %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
+ store <4 x float> %2, ptr %1, align 4
+ %index.next = add nuw i64 %index, 4
+ %3 = icmp eq i64 %index.next, 8
+ br i1 %3, label %exit, label %vector.body
+
+exit: ; preds = %vector.body
+ ret void
+}
+
+
+define void @saxpy_tripcount1K_av0(ptr %dst, ptr %src, float %a) {
+; CHECK-LABEL: define void @saxpy_tripcount1K_av0(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP3]], label %[[EXIT:.*]], label %[[VECTOR_BODY]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SIFIVE-LABEL: define void @saxpy_tripcount1K_av0(
+; SIFIVE-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0]] {
+; SIFIVE-NEXT: [[ENTRY:.*]]:
+; SIFIVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; SIFIVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; SIFIVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; SIFIVE: [[VECTOR_BODY]]:
+; SIFIVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SIFIVE-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]]
+; SIFIVE-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
+; SIFIVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; SIFIVE-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; SIFIVE-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; SIFIVE-NEXT: store <4 x float> [[TMP2]], ptr [[TMP1]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT1:%.*]] = add nuw nsw i64 [[INDEX]], 4
+; SIFIVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX_NEXT1]]
+; SIFIVE-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x float>, ptr [[TMP12]], align 4
+; SIFIVE-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX_NEXT1]]
+; SIFIVE-NEXT: [[WIDE_LOAD12_1:%.*]] = load <4 x float>, ptr [[TMP4]], align 4
+; SIFIVE-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_1]], <4 x float> [[WIDE_LOAD12_1]])
+; SIFIVE-NEXT: store <4 x float> [[TMP5]], ptr [[TMP4]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT_1:%.*]] = add nuw nsw i64 [[INDEX]], 8
+; SIFIVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX_NEXT_1]]
+; SIFIVE-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x float>, ptr [[TMP6]], align 4
+; SIFIVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX_NEXT_1]]
+; SIFIVE-NEXT: [[WIDE_LOAD12_2:%.*]] = load <4 x float>, ptr [[TMP7]], align 4
+; SIFIVE-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_2]], <4 x float> [[WIDE_LOAD12_2]])
+; SIFIVE-NEXT: store <4 x float> [[TMP8]], ptr [[TMP7]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT_2:%.*]] = add nuw nsw i64 [[INDEX]], 12
+; SIFIVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX_NEXT_2]]
+; SIFIVE-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x float>, ptr [[TMP9]], align 4
+; SIFIVE-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX_NEXT_2]]
+; SIFIVE-NEXT: [[WIDE_LOAD12_3:%.*]] = load <4 x float>, ptr [[TMP10]], align 4
+; SIFIVE-NEXT: [[TMP11:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_3]], <4 x float> [[WIDE_LOAD12_3]])
+; SIFIVE-NEXT: store <4 x float> [[TMP11]], ptr [[TMP10]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT]] = add nuw nsw i64 [[INDEX]], 16
+; SIFIVE-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT:.*]], label %[[VECTOR_BODY]]
+; SIFIVE: [[EXIT]]:
+; SIFIVE-NEXT: ret void
+;
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %a, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds nuw float, ptr %src, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = getelementptr inbounds nuw float, ptr %dst, i64 %index
+ %wide.load12 = load <4 x float>, ptr %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
+ store <4 x float> %2, ptr %1, align 4
+ %index.next = add nuw i64 %index, 4
+ %3 = icmp eq i64 %index.next, 1024
+ br i1 %3, label %exit, label %vector.body
+
+exit: ; preds = %vector.body
+ ret void
+}
+
+
+define void @saxpy_tripcount1K_av1(ptr %dst, ptr %src, float %a) {
+; COMMON-LABEL: define void @saxpy_tripcount1K_av1(
+; COMMON-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[ENTRY:.*]]:
+; COMMON-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; COMMON-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; COMMON-NEXT: br label %[[VECTOR_BODY:.*]]
+; COMMON: [[VECTOR_BODY]]:
+; COMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; COMMON-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]]
+; COMMON-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
+; COMMON-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; COMMON-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; COMMON-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; COMMON-NEXT: store <4 x float> [[TMP2]], ptr [[TMP1]], align 4
+; COMMON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; COMMON-NEXT: br i1 [[TMP3]], label %[[EXIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; COMMON: [[EXIT]]:
+; COMMON-NEXT: ret void
+;
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %a, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds nuw float, ptr %src, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = getelementptr inbounds nuw float, ptr %dst, i64 %index
+ %wide.load12 = load <4 x float>, ptr %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
+ store <4 x float> %2, ptr %1, align 4
+ %index.next = add nuw i64 %index, 4
+ %3 = icmp eq i64 %index.next, 1024
+ br i1 %3, label %exit, label %vector.body, !llvm.loop !0
+
+exit: ; preds = %vector.body
+ ret void
+}
+!0 = !{!0, !1}
+!1 = !{!"llvm.loop.isvectorized", i32 1}
+
+; On SiFive we should runtime unroll the scalar epilogue loop, but not the
+; vector loop.
+define void @scalar_epilogue(ptr %p, i8 %splat.scalar, i64 %n) {
+; CHECK-LABEL: define void @scalar_epilogue(
+; CHECK-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[GEP_P_IV:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_P_IV_16:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_P_IV]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[GEP_P_IV]], align 1
+; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <16 x i8>, ptr [[GEP_P_IV_16]], align 1
+; CHECK-NEXT: [[ADD_BROADCAST:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[ADD_BROADCAST_2:%.*]] = add <16 x i8> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <16 x i8> [[ADD_BROADCAST]], ptr [[GEP_P_IV]], align 1
+; CHECK-NEXT: store <16 x i8> [[ADD_BROADCAST_2]], ptr [[GEP_P_IV_16]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 32
+; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; CHECK: [[SCALAR_REMAINDER_PREHEADER]]:
+; CHECK-NEXT: [[IV_SCALAR_LOOP_PH:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; CHECK: [[SCALAR_REMAINDER]]:
+; CHECK-NEXT: [[IV_SCALAR_LOOP:%.*]] = phi i64 [ [[INC:%.*]], %[[SCALAR_REMAINDER]] ], [ [[IV_SCALAR_LOOP_PH]], %[[SCALAR_REMAINDER_PREHEADER]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV_SCALAR_LOOP]]
+; CHECK-NEXT: [[SCALAR_LOAD:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SCALAR_LOAD]], [[SPLAT_SCALAR]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[INC]] = add nuw i64 [[IV_SCALAR_LOOP]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[EXIT_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SIFIVE-LABEL: define void @scalar_epilogue(
+; SIFIVE-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; SIFIVE-NEXT: [[ENTRY:.*]]:
+; SIFIVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; SIFIVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; SIFIVE: [[VECTOR_PH]]:
+; SIFIVE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; SIFIVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; SIFIVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; SIFIVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; SIFIVE: [[VECTOR_BODY]]:
+; SIFIVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SIFIVE-NEXT: [[GEP_P_IV:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV]]
+; SIFIVE-NEXT: [[GEP_P_IV_16:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_P_IV]], i64 16
+; SIFIVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[GEP_P_IV]], align 1
+; SIFIVE-NEXT: [[WIDE_LOAD_2:%.*]] = load <16 x i8>, ptr [[GEP_P_IV_16]], align 1
+; SIFIVE-NEXT: [[ADD_BROADCAST:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; SIFIVE-NEXT: [[ADD_BROADCAST_2:%.*]] = add <16 x i8> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]]
+; SIFIVE-NEXT: store <16 x i8> [[ADD_BROADCAST]], ptr [[GEP_P_IV]], align 1
+; SIFIVE-NEXT: store <16 x i8> [[ADD_BROADCAST_2]], ptr [[GEP_P_IV_16]], align 1
+; SIFIVE-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 32
+; SIFIVE-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; SIFIVE-NEXT: br i1 [[EXIT_COND]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; SIFIVE: [[MIDDLE_BLOCK]]:
+; SIFIVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; SIFIVE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; SIFIVE: [[SCALAR_REMAINDER_PREHEADER]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; SIFIVE-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[IV_SCALAR_LOOP]]
+; SIFIVE-NEXT: [[TMP1:%.*]] = add i64 [[N]], -1
+; SIFIVE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[IV_SCALAR_LOOP]]
+; SIFIVE-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7
+; SIFIVE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
+; SIFIVE-NEXT: br i1 [[LCMP_MOD]], label %[[SCALAR_REMAINDER_PROL_PREHEADER:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_PREHEADER]]:
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER_PROL:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL]]:
+; SIFIVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV_SCALAR_LOOP]]
+; SIFIVE-NEXT: [[SCALAR_LOAD:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; SIFIVE-NEXT: [[ADD:%.*]] = add i8 [[SCALAR_LOAD]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; SIFIVE-NEXT: [[INC:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 1
+; SIFIVE-NEXT: [[PROL_ITER_CMP:%.*]] = icmp ne i64 1, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP]], label %[[SCALAR_REMAINDER_PROL_1:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_1]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_1:%.*]] = load i8, ptr [[ARRAYIDX_PROL_1]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_1:%.*]] = add i8 [[SCALAR_LOAD_PROL_1]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_1]], ptr [[ARRAYIDX_PROL_1]], align 1
+; SIFIVE-NEXT: [[INC_PROL_1:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 2
+; SIFIVE-NEXT: [[PROL_ITER_CMP_1:%.*]] = icmp ne i64 2, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_1]], label %[[SCALAR_REMAINDER_PROL_2:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_2]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_2:%.*]] = load i8, ptr [[ARRAYIDX_PROL_2]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_2:%.*]] = add i8 [[SCALAR_LOAD_PROL_2]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_2]], ptr [[ARRAYIDX_PROL_2]], align 1
+; SIFIVE-NEXT: [[INC_PROL_2:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 3
+; SIFIVE-NEXT: [[PROL_ITER_CMP_2:%.*]] = icmp ne i64 3, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_2]], label %[[SCALAR_REMAINDER_PROL_3:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_3]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_3:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_2]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_3:%.*]] = load i8, ptr [[ARRAYIDX_PROL_3]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_3:%.*]] = add i8 [[SCALAR_LOAD_PROL_3]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_3]], ptr [[ARRAYIDX_PROL_3]], align 1
+; SIFIVE-NEXT: [[INC_PROL_3:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 4
+; SIFIVE-NEXT: [[PROL_ITER_CMP_3:%.*]] = icmp ne i64 4, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_3]], label %[[SCALAR_REMAINDER_PROL_4:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_4]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_4:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_3]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_4:%.*]] = load i8, ptr [[ARRAYIDX_PROL_4]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_4:%.*]] = add i8 [[SCALAR_LOAD_PROL_4]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_4]], ptr [[ARRAYIDX_PROL_4]], align 1
+; SIFIVE-NEXT: [[INC_PROL_4:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 5
+; SIFIVE-NEXT: [[PROL_ITER_CMP_4:%.*]] = icmp ne i64 5, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_4]], label %[[SCALAR_REMAINDER_PROL_5:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_5]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_5:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_4]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_5:%.*]] = load i8, ptr [[ARRAYIDX_PROL_5]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_5:%.*]] = add i8 [[SCALAR_LOAD_PROL_5]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_5]], ptr [[ARRAYIDX_PROL_5]], align 1
+; SIFIVE-NEXT: [[INC_PROL_5:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 6
+; SIFIVE-NEXT: [[PROL_ITER_CMP_5:%.*]] = icmp ne i64 6, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_5]], label %[[SCALAR_REMAINDER_PROL_6:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_6]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_6:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_5]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_6:%.*]] = load i8, ptr [[ARRAYIDX_PROL_6]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_6:%.*]] = add i8 [[SCALAR_LOAD_PROL_6]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_6]], ptr [[ARRAYIDX_PROL_6]], align 1
+; SIFIVE-NEXT: [[INC_PROL_6:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 7
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP_UNR_PH:%.*]] = phi i64 [ [[INC]], %[[SCALAR_REMAINDER_PROL]] ], [ [[INC_PROL_1]], %[[SCALAR_REMAINDER_PROL_1]] ], [ [[INC_PROL_2]], %[[SCALAR_REMAINDER_PROL_2]] ], [ [[INC_PROL_3]], %[[SCALAR_REMAINDER_PROL_3]] ], [ [[INC_PROL_4]], %[[SCALAR_REMAINDER_PROL_4]] ], [ [[INC_PROL_5]], %[[SCALAR_REMAINDER_PROL_5]] ], [ [[INC_PROL_6]], %[[SCALAR_REMAINDER_PROL_6]] ]
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_LOOPEXIT]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP_UNR:%.*]] = phi i64 [ [[IV_SCALAR_LOOP]], %[[SCALAR_REMAINDER_PREHEADER]] ], [ [[IV_SCALAR_LOOP_UNR_PH]], %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]] ]
+; SIFIVE-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], 7
+; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER_NEW:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PREHEADER_NEW]]:
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; SIFIVE: [[SCALAR_REMAINDER]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP1:%.*]] = phi i64 [ [[IV_SCALAR_LOOP_UNR]], %[[SCALAR_REMAINDER_PREHEADER_NEW]] ], [ [[INC_7:%.*]], %[[SCALAR_REMAINDER]] ]
+; SIFIVE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV_SCALAR_LOOP1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; SIFIVE-NEXT: [[ADD1:%.*]] = add i8 [[SCALAR_LOAD1]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD1]], ptr [[ARRAYIDX1]], align 1
+; SIFIVE-NEXT: [[INC1:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 1
+; SIFIVE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_1:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; SIFIVE-NEXT: [[ADD_1:%.*]] = add i8 [[SCALAR_LOAD_1]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX_1]], align 1
+; SIFIVE-NEXT: [[INC_1:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 2
+; SIFIVE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_2:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; SIFIVE-NEXT: [[ADD_2:%.*]] = add i8 [[SCALAR_LOAD_2]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_2]], ptr [[ARRAYIDX_2]], align 1
+; SIFIVE-NEXT: [[INC_2:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 3
+; SIFIVE-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_2]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_3:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; SIFIVE-NEXT: [[ADD_3:%.*]] = add i8 [[SCALAR_LOAD_3]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_3]], ptr [[ARRAYIDX_3]], align 1
+; SIFIVE-NEXT: [[INC_3:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 4
+; SIFIVE-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_3]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_4:%.*]] = load i8, ptr [[ARRAYIDX_4]], align 1
+; SIFIVE-NEXT: [[ADD_4:%.*]] = add i8 [[SCALAR_LOAD_4]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_4]], ptr [[ARRAYIDX_4]], align 1
+; SIFIVE-NEXT: [[INC_4:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 5
+; SIFIVE-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_4]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_5:%.*]] = load i8, ptr [[ARRAYIDX_5]], align 1
+; SIFIVE-NEXT: [[ADD_5:%.*]] = add i8 [[SCALAR_LOAD_5]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_5]], ptr [[ARRAYIDX_5]], align 1
+; SIFIVE-NEXT: [[INC_5:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 6
+; SIFIVE-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_5]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_6:%.*]] = load i8, ptr [[ARRAYIDX_6]], align 1
+; SIFIVE-NEXT: [[ADD_6:%.*]] = add i8 [[SCALAR_LOAD_6]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_6]], ptr [[ARRAYIDX_6]], align 1
+; SIFIVE-NEXT: [[INC_6:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 7
+; SIFIVE-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_6]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_7:%.*]] = load i8, ptr [[ARRAYIDX_7]], align 1
+; SIFIVE-NEXT: [[ADD_7:%.*]] = add i8 [[SCALAR_LOAD_7]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_7]], ptr [[ARRAYIDX_7]], align 1
+; SIFIVE-NEXT: [[INC_7]] = add nuw i64 [[IV_SCALAR_LOOP1]], 8
+; SIFIVE-NEXT: [[EXITCOND_NOT_7:%.*]] = icmp eq i64 [[INC_7]], [[N]]
+; SIFIVE-NEXT: br i1 [[EXITCOND_NOT_7]], label %[[EXIT_LOOPEXIT_UNR_LCSSA:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP3:![0-9]+]]
+; SIFIVE: [[EXIT_LOOPEXIT_UNR_LCSSA]]:
+; SIFIVE-NEXT: br label %[[EXIT_LOOPEXIT]]
+; SIFIVE: [[EXIT_LOOPEXIT]]:
+; SIFIVE-NEXT: br label %[[EXIT]]
+; SIFIVE: [[EXIT]]:
+; SIFIVE-NEXT: ret void
+;
+entry:
+ %min.iters.check = icmp ult i64 %n, 32
+ br i1 %min.iters.check, label %scalar.remainder, label %vector.ph
+
+vector.ph:
+ %n.vec = and i64 %n, -32
+ %broadcast.splatinsert = insertelement <16 x i8> poison, i8 %splat.scalar, i64 0
+ %broadcast.splat = shufflevector <16 x i8> %broadcast.splatinsert, <16 x i8> poison, <16 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body:
+ %iv = phi i64 [ 0, %vector.ph ], [ %iv.next, %vector.body ]
+ %gep.p.iv = getelementptr inbounds nuw i8, ptr %p, i64 %iv
+ %gep.p.iv.16 = getelementptr inbounds nuw i8, ptr %gep.p.iv, i64 16
+ %wide.load = load <16 x i8>, ptr %gep.p.iv, align 1
+ %wide.load.2 = load <16 x i8>, ptr %gep.p.iv.16, align 1
+ %add.broadcast = add <16 x i8> %wide.load, %broadcast.splat
+ %add.broadcast.2 = add <16 x i8> %wide.load.2, %broadcast.splat
+ store <16 x i8> %add.broadcast, ptr %gep.p.iv, align 1
+ store <16 x i8> %add.broadcast.2, ptr %gep.p.iv.16, align 1
+ %iv.next = add nuw i64 %iv, 32
+ %exit.cond = icmp eq i64 %iv.next, %n.vec
+ br i1 %exit.cond, label %middle.block, label %vector.body, !llvm.loop !2
+
+middle.block:
+ %cmp.n = icmp eq i64 %n, %n.vec
+ br i1 %cmp.n, label %exit, label %scalar.remainder
+
+scalar.remainder:
+ %iv.scalar.loop = phi i64 [ %inc, %scalar.remainder ], [ %n.vec, %middle.block ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw i8, ptr %p, i64 %iv.scalar.loop
+ %scalar.load = load i8, ptr %arrayidx, align 1
+ %add = add i8 %scalar.load, %splat.scalar
+ store i8 %add, ptr %arrayidx, align 1
+ %inc = add nuw i64 %iv.scalar.loop, 1
+ %exitcond.not = icmp eq i64 %inc, %n
+ br i1 %exitcond.not, label %exit, label %scalar.remainder, !llvm.loop !3
+
+exit:
+ ret void
+}
+
+!2 = distinct !{!2, !1}
+!3 = distinct !{!3, !1}
+
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
+; SIFIVE: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
+; SIFIVE: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; SIFIVE: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; SIFIVE: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
index 011b823..a7ec749 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
@@ -27,7 +27,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> poison)
; CHECK-NEXT: [[TMP12:%.*]] = select fast <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> zeroinitializer
; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP12]])
-; CHECK-NEXT: [[TMP14]] = fadd fast float [[TMP13]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP14]] = fadd fast float [[VEC_PHI]], [[TMP13]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
index 3d81541..e555785 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
@@ -33,9 +33,9 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP10]], align 8
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i64>, ptr [[TMP15]], align 8
; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP17]] = and i64 [[TMP16]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP17]] = and i64 [[VEC_PHI]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> [[WIDE_LOAD3]])
-; CHECK-NEXT: [[TMP19]] = and i64 [[TMP18]], [[VEC_PHI2]]
+; CHECK-NEXT: [[TMP19]] = and i64 [[VEC_PHI2]], [[TMP18]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP21]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -59,7 +59,7 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) {
; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX7]]
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <2 x i64>, ptr [[TMP24]], align 8
; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> [[WIDE_LOAD9]])
-; CHECK-NEXT: [[TMP27]] = and i64 [[TMP26]], [[VEC_PHI8]]
+; CHECK-NEXT: [[TMP27]] = and i64 [[VEC_PHI8]], [[TMP26]]
; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], 2
; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC5]]
; CHECK-NEXT: br i1 [[TMP28]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index b349c55..47ce05d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -267,7 +267,7 @@ define i32 @test_struct_load6(ptr %S) #1 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP2]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[S:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[S:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP5]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 1
; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP6]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
@@ -366,7 +366,7 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP8:%.*]] = sub nsw i64 2, [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 [[TMP8]]
@@ -378,7 +378,7 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP11]])
; CHECK-NEXT: [[TMP12:%.*]] = add nsw <vscale x 4 x i32> [[REVERSE]], [[VEC_IND]]
; CHECK-NEXT: [[TMP13:%.*]] = sub nsw <vscale x 4 x i32> [[REVERSE1]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_ST2]], ptr [[B:%.*]], i64 [[OFFSET_IDX]], i32 0
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_ST2]], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP18:%.*]] = sub nsw i64 2, [[TMP15]]
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP14]], i64 [[TMP18]]
@@ -587,7 +587,7 @@ define void @load_gap_reverse(ptr noalias nocapture readonly %P1, ptr noalias no
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP4:%.*]] = add nsw <vscale x 4 x i64> [[BROADCAST_SPLAT1]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P1:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P1:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 1
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> [[TMP6]], i32 8, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i64> poison)
; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], [[VEC_IND]]
@@ -729,7 +729,7 @@ define void @int_float_struct(ptr nocapture readonly %p) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> zeroinitializer, float undef, i32 0), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 undef, i32 0), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_INTFLOAT:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_INTFLOAT:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
@@ -814,7 +814,7 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]], i32 1
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x ptr> [[TMP12]], i64 0
@@ -885,7 +885,7 @@ define i32 @PR27626_1(ptr %p, i64 %n) #1 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]], i32 1
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP12]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
@@ -963,7 +963,7 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]], i32 1
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
@@ -1036,7 +1036,7 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP12:%.*]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[TMP12]], i32 1
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP13]], align 4
@@ -1572,7 +1572,7 @@ define void @interleave_deinterleave_reverse(ptr noalias nocapture readonly %A,
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_XYZT:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_XYZT:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP0]], 4
; CHECK-NEXT: [[TMP9:%.*]] = sub nsw i64 4, [[TMP6]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i64 [[TMP9]]
@@ -1590,7 +1590,7 @@ define void @interleave_deinterleave_reverse(ptr noalias nocapture readonly %A,
; CHECK-NEXT: [[TMP18:%.*]] = sub nsw <vscale x 4 x i32> [[REVERSE3]], [[VEC_IND]]
; CHECK-NEXT: [[TMP19:%.*]] = mul nsw <vscale x 4 x i32> [[REVERSE4]], [[VEC_IND]]
; CHECK-NEXT: [[TMP20:%.*]] = shl nuw nsw <vscale x 4 x i32> [[REVERSE5]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_XYZT]], ptr [[B:%.*]], i64 [[OFFSET_IDX]], i32 0
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_XYZT]], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP22:%.*]] = shl nuw nsw i64 [[TMP0]], 4
; CHECK-NEXT: [[TMP25:%.*]] = sub nsw i64 4, [[TMP22]]
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 [[TMP25]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
index fc86e3a..f4982e6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
@@ -88,7 +88,7 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-IN-LOOP-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer
; CHECK-IN-LOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP13]])
-; CHECK-IN-LOOP-NEXT: [[TMP15]] = add i32 [[TMP14]], [[VEC_PHI]]
+; CHECK-IN-LOOP-NEXT: [[TMP15]] = add i32 [[VEC_PHI]], [[TMP14]]
; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP17]]
; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-IN-LOOP-NEXT: [[TMP18:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
@@ -349,7 +349,7 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-IN-LOOP-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP14]], i32 4, <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> poison)
; CHECK-IN-LOOP-NEXT: [[TMP17:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD1]], <vscale x 4 x i32> zeroinitializer
; CHECK-IN-LOOP-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP17]])
-; CHECK-IN-LOOP-NEXT: [[TMP19]] = xor i32 [[TMP18]], [[VEC_PHI]]
+; CHECK-IN-LOOP-NEXT: [[TMP19]] = xor i32 [[VEC_PHI]], [[TMP18]]
; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP21]]
; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-IN-LOOP-NEXT: [[TMP22:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll
index 3e9f6fa..352f4fe 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vscale-based-trip-counts.ll
@@ -10,60 +10,32 @@ define void @vscale_mul_4(ptr noalias noundef readonly captures(none) %a, ptr no
; CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[TMP3]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP10]], 8
-; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
-; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP18]], 4
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP11]]
-; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP26]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
-; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP16]]
-; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>, ptr [[TMP12]], align 4
-; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>, ptr [[TMP27]], align 4
-; CHECK-NEXT: [[TMP19:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD3]]
-; CHECK-NEXT: [[TMP28:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD4]]
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP21]]
-; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP12]], align 4
-; CHECK-NEXT: store <vscale x 4 x float> [[TMP28]], ptr [[TMP22]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[A]], align 4
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[B]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
+; CHECK-NEXT: store <vscale x 4 x float> [[TMP10]], ptr [[B]], align 4
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
-; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N_VEC]], %[[ENTRY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
-; CHECK-NEXT: [[MUL4:%.*]] = fmul float [[TMP24]], [[TMP25]]
+; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[MUL4:%.*]] = fmul float [[TMP12]], [[TMP13]]
; CHECK-NEXT: store float [[MUL4]], ptr [[ARRAYIDX3]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[TMP1]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
;
entry:
%0 = tail call i64 @llvm.vscale.i64()
@@ -131,7 +103,7 @@ define void @vscale_mul_8(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: store float [[MUL5]], ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[MUL1]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
;
entry:
%0 = tail call i64 @llvm.vscale.i64()
@@ -161,41 +133,28 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i64 [[TMP0]], 12
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 8
+; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[MUL1]], [[TMP2]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[MUL1]], [[TMP4]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[MUL1]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
+; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[TMP7]], i64 [[TMP10]]
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 4
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP15]]
-; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>, ptr [[TMP12]], align 4
-; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>, ptr [[TMP16]], align 4
-; CHECK-NEXT: [[TMP18:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD3]]
-; CHECK-NEXT: [[TMP25:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD4]]
-; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP20:%.*]] = mul nuw i64 [[TMP19]], 4
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw float, ptr [[TMP12]], i64 [[TMP20]]
-; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP12]], align 4
-; CHECK-NEXT: store <vscale x 4 x float> [[TMP25]], ptr [[TMP21]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4
+; CHECK-NEXT: [[TMP11:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
+; CHECK-NEXT: store <vscale x 4 x float> [[TMP11]], ptr [[TMP9]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[MUL1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
@@ -207,14 +166,14 @@ define void @vscale_mul_12(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
-; CHECK-NEXT: [[MUL5:%.*]] = fmul float [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT: [[MUL5:%.*]] = fmul float [[TMP13]], [[TMP14]]
; CHECK-NEXT: store float [[MUL5]], ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[MUL1]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
;
entry:
%0 = tail call i64 @llvm.vscale.i64()
@@ -278,7 +237,7 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP21]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[MUL1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
@@ -297,7 +256,7 @@ define void @vscale_mul_31(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: store float [[MUL5]], ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[MUL1]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
;
entry:
%0 = tail call i64 @llvm.vscale.i64()
@@ -361,7 +320,7 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP21]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[MUL1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
@@ -380,7 +339,7 @@ define void @vscale_mul_64(ptr noalias noundef readonly captures(none) %a, ptr n
; CHECK-NEXT: store float [[MUL5]], ptr [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[MUL1]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
;
entry:
%0 = tail call i64 @llvm.vscale.i64()
@@ -403,14 +362,178 @@ for.body:
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
+; The loop's trip count is unknown at compiler time if its calculation relies on
+; overflow.
+define void @trip_count_with_overflow(ptr noalias noundef readonly captures(none) %a, ptr noalias noundef captures(none) %b) #1 {
+; CHECK-LABEL: define void @trip_count_with_overflow(
+; CHECK-SAME: ptr noalias noundef readonly captures(none) [[A:%.*]], ptr noalias noundef captures(none) [[B:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[TMP3]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 [[TMP11]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP12]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP16]]
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>, ptr [[TMP17]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD2]]
+; CHECK-NEXT: [[TMP19:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD3]]
+; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]]
+; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4
+; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_COND_CLEANUP]]:
+; CHECK-NEXT: ret void
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[MUL4:%.*]] = fmul float [[TMP24]], [[TMP25]]
+; CHECK-NEXT: store float [[MUL4]], ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[TMP1]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+;
+entry:
+ %0 = tail call i64 @llvm.vscale.i64()
+ %1 = shl i64 %0, 2
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds nuw float, ptr %a, i64 %indvars.iv
+ %2 = load float, ptr %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds nuw float, ptr %b, i64 %indvars.iv
+ %3 = load float, ptr %arrayidx3, align 4
+ %mul4 = fmul float %2, %3
+ store float %mul4, ptr %arrayidx3, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %1
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; The known component of ElementCount is a 32-bit value.
+define void @trip_count_too_big_for_element_count(ptr noalias noundef readonly captures(none) %a, ptr noalias noundef captures(none) %b) #0 {
+; CHECK-LABEL: define void @trip_count_too_big_for_element_count(
+; CHECK-SAME: ptr noalias noundef readonly captures(none) [[A:%.*]], ptr noalias noundef captures(none) [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 32
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 [[TMP11]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP12]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP16]]
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>, ptr [[TMP17]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD2]]
+; CHECK-NEXT: [[TMP19:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD3]]
+; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]]
+; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4
+; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_COND_CLEANUP]]:
+; CHECK-NEXT: ret void
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[MUL4:%.*]] = fmul float [[TMP24]], [[TMP25]]
+; CHECK-NEXT: store float [[MUL4]], ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[TMP1]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+;
+entry:
+ %0 = tail call i64 @llvm.vscale.i64()
+ %1 = shl nsw nuw i64 %0, 32
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds nuw float, ptr %a, i64 %indvars.iv
+ %2 = load float, ptr %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds nuw float, ptr %b, i64 %indvars.iv
+ %3 = load float, ptr %arrayidx3, align 4
+ %mul4 = fmul float %2, %3
+ store float %mul4, ptr %arrayidx3, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %1
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
declare i64 @llvm.vscale.i64()
attributes #0 = { vscale_range(1,16) "target-features"="+sve" }
+attributes #1 = { "target-features"="+sve" }
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]}
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]}
@@ -418,4 +541,7 @@ attributes #0 = { vscale_range(1,16) "target-features"="+sve" }
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META2]], [[META1]]}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META2]], [[META1]]}
+; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
index bc02595..9f2c70e 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
@@ -17,7 +17,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP0]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -66,11 +66,11 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[VEC_IND]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD1]])
-; CHECK-NEXT: [[TMP8]] = add i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT: [[TMP8]] = add i32 [[TMP6]], [[TMP7]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
index f1bee3b..83cb325 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
@@ -29,7 +29,7 @@ define i32 @mla_i32(ptr noalias nocapture readonly %A, ptr noalias nocapture rea
; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP6]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP7]])
-; CHECK-NEXT: [[TMP10]] = add i32 [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -109,7 +109,7 @@ define i32 @mla_i8(ptr noalias nocapture readonly %A, ptr noalias nocapture read
; CHECK-NEXT: [[TMP7:%.*]] = mul nsw <16 x i32> [[TMP6]], [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP7]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]])
-; CHECK-NEXT: [[TMP10]] = add i32 [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -184,7 +184,7 @@ define i32 @add_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
index e27b028..27e87bc 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
@@ -23,10 +23,10 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP9:%.*]] = sext <4 x i32> [[WIDE_LOAD2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP9]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI1]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -96,10 +96,10 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP14:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD4]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[TMP9:%.*]] = sext <4 x i32> [[TMP14]] to <4 x i64>
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP9]])
-; CHECK-NEXT: [[TMP11]] = add i64 [[TMP10]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP11]] = add i64 [[VEC_PHI1]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
index ddd334d..658b9a4 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
@@ -62,7 +62,7 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -127,7 +127,7 @@ define i64 @add_i16_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -192,7 +192,7 @@ define i64 @add_i8_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -254,7 +254,7 @@ define i32 @add_i32_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP0]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -300,7 +300,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD]] to <8 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP1]], <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
@@ -347,7 +347,7 @@ define i32 @add_i8_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP1]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
@@ -392,7 +392,7 @@ define signext i16 @add_i16_i16(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP0]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i16 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i16 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
@@ -438,7 +438,7 @@ define signext i16 @add_i8_i16(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i16>
; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i16> [[TMP1]], <16 x i16> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i16 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i16 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
@@ -483,7 +483,7 @@ define zeroext i8 @add_i8_i8(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i8 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i8 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
@@ -577,7 +577,7 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
@@ -652,7 +652,7 @@ define i64 @mla_i16_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <8 x i64> [[TMP4]], [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
@@ -731,7 +731,7 @@ define i64 @mla_i8_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32
; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw <8 x i64> [[TMP4]], [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
@@ -807,7 +807,7 @@ define i32 @mla_i32_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP2]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
@@ -860,7 +860,7 @@ define i32 @mla_i16_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP4]], <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
@@ -915,7 +915,7 @@ define i32 @mla_i8_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i32
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <16 x i32> [[TMP2]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP4]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
@@ -968,7 +968,7 @@ define signext i16 @mla_i16_i16(ptr nocapture readonly %x, ptr nocapture readonl
; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i16> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> [[TMP2]], <8 x i16> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i16 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i16 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
@@ -1021,7 +1021,7 @@ define signext i16 @mla_i8_i16(ptr nocapture readonly %x, ptr nocapture readonly
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw <16 x i16> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i16> [[TMP4]], <16 x i16> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i16 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i16 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
@@ -1074,7 +1074,7 @@ define zeroext i8 @mla_i8_i8(ptr nocapture readonly %x, ptr nocapture readonly %
; CHECK-NEXT: [[TMP2:%.*]] = mul <16 x i8> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> [[TMP2]], <16 x i8> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i8 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i8 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
@@ -1127,7 +1127,7 @@ define i32 @red_mla_ext_s8_s16_s32(ptr noalias nocapture readonly %A, ptr noalia
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP4]], <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
@@ -1190,7 +1190,7 @@ define i64 @red_mla_ext_s16_u16_s64(ptr noalias nocapture readonly %A, ptr noali
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i32> [[TMP4]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
@@ -1275,7 +1275,7 @@ define i32 @red_mla_u8_s8_u32(ptr noalias nocapture readonly %A, ptr noalias noc
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP4]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
@@ -1339,9 +1339,9 @@ define i32 @reduction_interleave_group(i32 %n, ptr %arr) #0 {
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[STRIDED_VEC1]])
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[STRIDED_VEC]])
-; CHECK-NEXT: [[TMP9]] = add i32 [[TMP8]], [[TMP7]]
+; CHECK-NEXT: [[TMP9]] = add i32 [[TMP7]], [[TMP8]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
@@ -1412,7 +1412,7 @@ define i32 @mla_i8_i32_multiuse(ptr nocapture readonly %x, ptr nocapture readonl
; CHECK-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i32> [[TMP7]], [[TMP7]]
; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP2]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
@@ -1460,7 +1460,7 @@ define i64 @mla_xx_sext_zext(ptr nocapture noundef readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = mul nsw <8 x i64> [[TMP1]], [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
@@ -1528,10 +1528,10 @@ define i64 @mla_and_add_together_16_64(ptr nocapture noundef readonly %x, i32 no
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i64> [[TMP4]], [[TMP4]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP2]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP10:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP10]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI1]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
@@ -1678,11 +1678,11 @@ define i64 @test_std_q31(ptr %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i32> [[WIDE_LOAD]], splat (i32 8)
; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i64 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i64 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <4 x i64> [[TMP5]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]])
-; CHECK-NEXT: [[TMP8]] = add i64 [[TMP7]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP8]] = add i64 [[VEC_PHI1]], [[TMP7]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY1]], !llvm.loop [[LOOP37:![0-9]+]]
@@ -1770,12 +1770,12 @@ define i64 @test_fir_q15(ptr %x, ptr %y, i32 %n) #0 {
; CHECK-NEXT: [[TMP7:%.*]] = sext <8 x i16> [[STRIDED_VEC]] to <8 x i64>
; CHECK-NEXT: [[TMP8:%.*]] = mul nsw <8 x i64> [[TMP6]], [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP8]])
-; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[VEC_PHI]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = sext <8 x i16> [[STRIDED_VEC4]] to <8 x i64>
; CHECK-NEXT: [[TMP12:%.*]] = sext <8 x i16> [[STRIDED_VEC1]] to <8 x i64>
; CHECK-NEXT: [[TMP13:%.*]] = mul nsw <8 x i64> [[TMP11]], [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP13]])
-; CHECK-NEXT: [[TMP16]] = add i64 [[TMP15]], [[TMP10]]
+; CHECK-NEXT: [[TMP16]] = add i64 [[TMP10]], [[TMP15]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
index 21266e5..162440a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
@@ -1,6 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFBFMIN
-; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN-PREDICATED
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -S | FileCheck %s -check-prefix=ZVFBFMIN
define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
@@ -22,24 +22,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; NO-ZVFBFMIN: [[EXIT]]:
; NO-ZVFBFMIN-NEXT: ret void
;
-; NO-ZVFBFMIN-PREDICATED-LABEL: define void @fadd(
-; NO-ZVFBFMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[LOOP]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd bfloat [[X]], [[Y]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
-; NO-ZVFBFMIN-PREDICATED: [[EXIT]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: ret void
-;
; ZVFBFMIN-LABEL: define void @fadd(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
@@ -152,54 +134,6 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64
; NO-ZVFBFMIN: [[EXIT]]:
; NO-ZVFBFMIN-NEXT: ret void
;
-; NO-ZVFBFMIN-PREDICATED-LABEL: define void @vfwmaccbf16.vv(
-; NO-ZVFBFMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[VECTOR_PH]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[VECTOR_BODY:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[VECTOR_BODY]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <4 x bfloat>, ptr [[A_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = load <4 x bfloat>, ptr [[B_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = load <4 x float>, ptr [[C_GEP]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP4:%.*]] = fpext <4 x bfloat> [[WIDE_MASKED_LOAD]] to <4 x float>
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP5:%.*]] = fpext <4 x bfloat> [[WIDE_MASKED_LOAD3]] to <4 x float>
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[TMP4]], <4 x float> [[TMP5]], <4 x float> [[WIDE_MASKED_LOAD4]])
-; NO-ZVFBFMIN-PREDICATED-NEXT: store <4 x float> [[TMP6]], ptr [[C_GEP]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[I]], 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; NO-ZVFBFMIN-PREDICATED: [[MIDDLE_BLOCK]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
-; NO-ZVFBFMIN-PREDICATED: [[SCALAR_PH]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[LOOP]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP1:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I1]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP1:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I1]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[C_GEP1:%.*]] = getelementptr float, ptr [[C]], i64 [[I1]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP1]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP1]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Z:%.*]] = load float, ptr [[C_GEP1]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[X_EXT:%.*]] = fpext bfloat [[X]] to float
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y_EXT:%.*]] = fpext bfloat [[Y]] to float
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X_EXT]], float [[Y_EXT]], float [[Z]])
-; NO-ZVFBFMIN-PREDICATED-NEXT: store float [[FMULADD]], ptr [[C_GEP1]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I1]], 1
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
-; NO-ZVFBFMIN-PREDICATED: [[EXIT]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: ret void
-;
; ZVFBFMIN-LABEL: define void @vfwmaccbf16.vv(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
@@ -274,21 +208,3 @@ loop:
exit:
ret void
}
-;.
-; NO-ZVFBFMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; NO-ZVFBFMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; NO-ZVFBFMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; NO-ZVFBFMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
-; NO-ZVFBFMIN-PREDICATED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; NO-ZVFBFMIN-PREDICATED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; NO-ZVFBFMIN-PREDICATED: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; NO-ZVFBFMIN-PREDICATED: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
-; ZVFBFMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; ZVFBFMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; ZVFBFMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; ZVFBFMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; ZVFBFMIN: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
-; ZVFBFMIN: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
-;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
index 5d032e6..5f13089 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
@@ -23,10 +23,9 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP10]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -36,9 +35,9 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VEC_IND]], ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[TMP8]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
index 53e43e1..effaf57 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
@@ -1,6 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFHMIN
-; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN-PREDICATED
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -S | FileCheck %s -check-prefix=ZVFHMIN
define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
@@ -22,24 +22,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; NO-ZVFHMIN: [[EXIT]]:
; NO-ZVFHMIN-NEXT: ret void
;
-; NO-ZVFHMIN-PREDICATED-LABEL: define void @fadd(
-; NO-ZVFHMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; NO-ZVFHMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
-; NO-ZVFHMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
-; NO-ZVFHMIN-PREDICATED: [[LOOP]]:
-; NO-ZVFHMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; NO-ZVFHMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]]
-; NO-ZVFHMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]]
-; NO-ZVFHMIN-PREDICATED-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2
-; NO-ZVFHMIN-PREDICATED-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2
-; NO-ZVFHMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]]
-; NO-ZVFHMIN-PREDICATED-NEXT: store half [[Z]], ptr [[A_GEP]], align 2
-; NO-ZVFHMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; NO-ZVFHMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; NO-ZVFHMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
-; NO-ZVFHMIN-PREDICATED: [[EXIT]]:
-; NO-ZVFHMIN-PREDICATED-NEXT: ret void
-;
; ZVFHMIN-LABEL: define void @fadd(
; ZVFHMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFHMIN-NEXT: [[ENTRY:.*]]:
@@ -86,6 +68,23 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
+; NO-ZVFHMIN-PREDICATED-LABEL: define void @fadd(
+; NO-ZVFHMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; NO-ZVFHMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
+; NO-ZVFHMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
+; NO-ZVFHMIN-PREDICATED: [[LOOP]]:
+; NO-ZVFHMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; NO-ZVFHMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]]
+; NO-ZVFHMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]]
+; NO-ZVFHMIN-PREDICATED-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2
+; NO-ZVFHMIN-PREDICATED-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2
+; NO-ZVFHMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]]
+; NO-ZVFHMIN-PREDICATED-NEXT: store half [[Z]], ptr [[A_GEP]], align 2
+; NO-ZVFHMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1
+; NO-ZVFHMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
+; NO-ZVFHMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-ZVFHMIN-PREDICATED: [[EXIT]]:
+; NO-ZVFHMIN-PREDICATED-NEXT: ret void
entry:
br label %loop
loop:
@@ -102,9 +101,3 @@ loop:
exit:
ret void
}
-;.
-; ZVFHMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; ZVFHMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; ZVFHMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; ZVFHMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
index 5b2d308..6e2434a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
@@ -85,7 +85,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP7]], align 2
; INLOOP-NEXT: [[TMP9:%.*]] = sext <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i32>
; INLOOP-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP9]])
-; INLOOP-NEXT: [[TMP11]] = add i32 [[TMP10]], [[VEC_PHI]]
+; INLOOP-NEXT: [[TMP11]] = add i32 [[VEC_PHI]], [[TMP10]]
; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
; INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -130,10 +130,9 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4
; IF-EVL-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-OUTLOOP: vector.body:
-; IF-EVL-OUTLOOP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i32 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(ptr align 2 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
@@ -141,8 +140,8 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP-NEXT: [[VP_OP:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP9]]
; IF-EVL-OUTLOOP-NEXT: [[TMP10]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP5]])
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP5]], [[EVL_BASED_IV]]
-; IF-EVL-OUTLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP5]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
@@ -185,10 +184,9 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 8
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-INLOOP: vector.body:
-; IF-EVL-INLOOP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = phi i32 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[TMP5]], i32 8, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 2 [[TMP8]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
@@ -196,8 +194,8 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = call i32 @llvm.vp.reduce.add.nxv8i32(i32 0, <vscale x 8 x i32> [[TMP14]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-INLOOP-NEXT: [[TMP11]] = add i32 [[TMP10]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]]
-; IF-EVL-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
-; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i32 [[TMP5]], [[TMP6]]
+; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
; IF-EVL-INLOOP-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
@@ -352,10 +350,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; IF-EVL-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-OUTLOOP: vector.body:
-; IF-EVL-OUTLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -364,8 +361,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-OUTLOOP-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-OUTLOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP15]])
@@ -402,10 +399,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-INLOOP: vector.body:
-; IF-EVL-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -413,9 +409,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP13]], i32 [[VEC_PHI]])
; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
-; IF-EVL-INLOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-INLOOP-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-INLOOP-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
+; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-INLOOP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
; IF-EVL-INLOOP: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
index c35a6ab..6f20376 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
@@ -110,47 +110,39 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; PREDICATED_DATA-WITH-EVL: vector.ph:
; PREDICATED_DATA-WITH-EVL-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4
-; PREDICATED_DATA-WITH-EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023
-; PREDICATED_DATA-WITH-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
; PREDICATED_DATA-WITH-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; PREDICATED_DATA-WITH-EVL: vector.body:
-; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true)
-; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0
+; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = phi i32 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true)
+; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP8:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64>
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP8]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP9]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1)
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64>
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP12]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP13:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP14:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64>
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP14]]
-; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x ptr> align 1 [[TMP15]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP13]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64>
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP17]]
-; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP16]], <vscale x 16 x ptr> align 1 [[TMP18]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP5]], [[EVL_BASED_IV]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP2:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP3:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1)
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP4:%.*]] = zext nneg <vscale x 16 x i32> [[TMP3]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP4]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP5]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP6:%.*]] = or disjoint <vscale x 16 x i32> [[TMP3]], splat (i32 1)
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP7:%.*]] = zext nneg <vscale x 16 x i32> [[TMP6]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP7]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP8]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP9:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP10:%.*]] = zext nneg <vscale x 16 x i32> [[TMP3]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP10]]
+; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP9]], <vscale x 16 x ptr> align 1 [[TMP11]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP12:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP9]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP13:%.*]] = zext nneg <vscale x 16 x i32> [[TMP6]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP13]]
+; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x ptr> align 1 [[TMP14]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 1024
+; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; PREDICATED_DATA-WITH-EVL: middle.block:
; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]]
; PREDICATED_DATA-WITH-EVL: scalar.ph:
@@ -327,63 +319,55 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; PREDICATED_DATA-WITH-EVL: vector.ph:
; PREDICATED_DATA-WITH-EVL-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4
-; PREDICATED_DATA-WITH-EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023
-; PREDICATED_DATA-WITH-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
; PREDICATED_DATA-WITH-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; PREDICATED_DATA-WITH-EVL: vector.body:
-; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true)
-; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0
+; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = phi i32 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true)
+; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 2)
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP8:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1)
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP9:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 2)
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 3)
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP2:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP3:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 2)
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP4:%.*]] = or disjoint <vscale x 16 x i32> [[TMP3]], splat (i32 1)
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP5:%.*]] = or disjoint <vscale x 16 x i32> [[TMP3]], splat (i32 2)
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP6:%.*]] = or disjoint <vscale x 16 x i32> [[TMP3]], splat (i32 3)
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP7:%.*]] = zext nneg <vscale x 16 x i32> [[TMP3]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP7]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP8]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP9:%.*]] = zext nneg <vscale x 16 x i32> [[TMP4]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP9]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP10]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP5]] to <vscale x 16 x i64>
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP12]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP13:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP12]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP13:%.*]] = zext nneg <vscale x 16 x i32> [[TMP6]] to <vscale x 16 x i64>
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP13]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP14]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP15:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64>
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP15]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP16]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64>
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP17]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP18]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP20:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP19]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP21:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER4]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP22:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP21]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP23:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP14]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP15:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP15]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER4]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER5]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP18:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP17]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP19:%.*]] = zext nneg <vscale x 16 x i32> [[TMP3]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP19]]
+; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP15]], <vscale x 16 x ptr> align 1 [[TMP20]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP21:%.*]] = zext nneg <vscale x 16 x i32> [[TMP4]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP21]]
+; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP16]], <vscale x 16 x ptr> align 1 [[TMP22]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP23:%.*]] = zext nneg <vscale x 16 x i32> [[TMP5]] to <vscale x 16 x i64>
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP23]]
-; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP19]], <vscale x 16 x ptr> align 1 [[TMP24]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP25:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64>
+; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP17]], <vscale x 16 x ptr> align 1 [[TMP24]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP25:%.*]] = zext nneg <vscale x 16 x i32> [[TMP6]] to <vscale x 16 x i64>
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP25]]
-; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP20]], <vscale x 16 x ptr> align 1 [[TMP26]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP27:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64>
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP27]]
-; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP21]], <vscale x 16 x ptr> align 1 [[TMP28]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP29:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64>
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP29]]
-; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP22]], <vscale x 16 x ptr> align 1 [[TMP30]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]])
-; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP5]], [[EVL_BASED_IV]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
+; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP18]], <vscale x 16 x ptr> align 1 [[TMP26]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
+; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP27:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 1024
+; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; PREDICATED_DATA-WITH-EVL: middle.block:
; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]]
; PREDICATED_DATA-WITH-EVL: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
index 0228811..01df436 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
@@ -30,15 +30,17 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i32> zeroinitializer, [[TMP10]]
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_COND]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i32 9, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 9, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 2, i1 true)
-; CHECK-NEXT: [[TMP12:%.*]] = mul i32 1, [[TMP11]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP12]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP11]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ule <vscale x 2 x i32> [[VEC_IND]], splat (i32 8)
+; CHECK-NEXT: [[TMP20:%.*]] = mul i32 1, [[TMP11]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP20]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT7]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 2 x i32> [[TMP19]], [[BROADCAST_SPLAT6]]
; CHECK-NEXT: [[TMP14:%.*]] = icmp sge <vscale x 2 x i32> [[VEC_IND]], splat (i32 2)
; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 2 x i1> [[TMP13]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer
; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i32> [[TMP7]], <vscale x 2 x i32> [[TMP8]]
@@ -46,9 +48,9 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[TMP17:%.*]] = trunc <vscale x 2 x i32> [[TMP16]] to <vscale x 2 x i8>
; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0(<vscale x 2 x i8> [[TMP17]], <vscale x 2 x ptr> align 1 [[BROADCAST_SPLAT4]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP11]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i32> [[VEC_IND]], [[BROADCAST_SPLAT6]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i32> [[VEC_IND]], [[BROADCAST_SPLAT8]]
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 9
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_COND]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT1:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
index f6f8f6a..ed50796 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
@@ -23,19 +23,18 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> [[VP_OP_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
-; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
@@ -89,9 +88,8 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
@@ -99,8 +97,8 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
@@ -151,10 +149,9 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
@@ -164,8 +161,8 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[TMP11]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP7]])
; CHECK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP11]])
@@ -225,17 +222,16 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
-; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
-; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
@@ -283,17 +279,16 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: store i64 [[V]], ptr [[B:%.*]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
@@ -404,19 +399,18 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> [[VP_OP_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
-; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
index f9395aa..38e7832 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
@@ -32,9 +32,8 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -43,9 +42,9 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -159,9 +158,8 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -170,9 +168,9 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -286,9 +284,8 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -297,9 +294,9 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -413,9 +410,8 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -424,9 +420,9 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -540,9 +536,8 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -551,9 +546,9 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -667,9 +662,8 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -678,9 +672,9 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -794,9 +788,8 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -805,9 +798,9 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -921,9 +914,8 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -932,9 +924,9 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -1048,9 +1040,8 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1059,9 +1050,9 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -1175,9 +1166,8 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1186,9 +1176,9 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -1302,9 +1292,8 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1313,9 +1302,9 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -1429,9 +1418,8 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1440,9 +1428,9 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -1556,9 +1544,8 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 16
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1567,9 +1554,9 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -1686,9 +1673,8 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -1697,9 +1683,9 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -1815,9 +1801,8 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -1826,9 +1811,9 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -1944,9 +1929,8 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -1955,9 +1939,9 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -2073,9 +2057,8 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -2084,9 +2067,9 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -2255,9 +2238,8 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -2266,9 +2248,9 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
+; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FINISH_LOOPEXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
index 7d21c4d..f604745 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
@@ -37,9 +37,8 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -50,9 +49,9 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -187,9 +186,8 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -200,9 +198,9 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -337,9 +335,8 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -350,9 +347,9 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -487,9 +484,8 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -500,9 +496,9 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
; IF-EVL: [[SCALAR_PH]]:
@@ -633,9 +629,8 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -644,8 +639,8 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP24]], ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -763,9 +758,8 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
@@ -774,8 +768,8 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP17]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP12]]
-; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
+; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -893,9 +887,8 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -906,8 +899,8 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP15]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -1033,9 +1026,8 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1046,8 +1038,8 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP15]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -1173,9 +1165,8 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1184,8 +1175,8 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP24]], ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
index 4b718dd..ce2b790 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
@@ -32,9 +32,8 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META0:![0-9]+]]
@@ -43,8 +42,8 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP11]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -161,9 +160,8 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META10:![0-9]+]]
@@ -172,8 +170,8 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META13:![0-9]+]], !noalias [[META10]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP11]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -290,9 +288,8 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META17:![0-9]+]]
@@ -301,8 +298,8 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP16]], ptr align 4 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP11]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -419,9 +416,8 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META24:![0-9]+]]
@@ -430,8 +426,8 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2f64.p0(<vscale x 2 x double> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META27:![0-9]+]], !noalias [[META24]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP11]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -548,9 +544,8 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META31:![0-9]+]]
@@ -559,8 +554,8 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2f32.p0(<vscale x 2 x float> [[TMP16]], ptr align 4 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META34:![0-9]+]], !noalias [[META31]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP11]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -677,9 +672,8 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -688,8 +682,8 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP13]]
-; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -806,9 +800,8 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -817,8 +810,8 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP13]]
-; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -935,9 +928,8 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -946,8 +938,8 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP13]]
-; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -1064,9 +1056,8 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -1075,8 +1066,8 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP13]]
-; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -1193,9 +1184,8 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
@@ -1204,8 +1194,8 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[TMP18]], ptr align 8 [[TMP19]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP13]]
-; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -1316,10 +1306,9 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP10]]
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -1331,9 +1320,9 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP15]], ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP48:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
index 5f53cb6..d02d53b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
@@ -34,10 +34,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
; IF-EVL-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-OUTLOOP: vector.body:
-; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT1:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = sub i64 [[N]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV1]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -47,9 +46,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP20]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP11]])
; IF-EVL-OUTLOOP-NEXT: [[TMP22:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT1]] = add i64 [[TMP22]], [[EVL_BASED_IV1]]
-; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[EVL_BASED_IV]], [[TMP8]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N_VEC]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP22]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT1]], [[N]]
+; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP20]])
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]]
@@ -87,10 +86,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-INLOOP: vector.body:
-; IF-EVL-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -100,9 +98,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
-; IF-EVL-INLOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]]
-; IF-EVL-INLOOP-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-INLOOP-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP23]]
+; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
; IF-EVL-INLOOP: scalar.ph:
@@ -197,7 +195,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
-; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[TMP11]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -254,25 +252,19 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; IF-EVL-OUTLOOP-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
-; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
-; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; IF-EVL-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-OUTLOOP: vector.body:
-; IF-EVL-OUTLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 4, i1 true)
-; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[EVL_BASED_IV]], i64 0
-; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; IF-EVL-OUTLOOP-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP13]]
-; IF-EVL-OUTLOOP-NEXT: [[VEC_IV:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP14]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP15:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]]
+; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
+; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; IF-EVL-OUTLOOP-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 4 x i32> [[TMP12]], [[BROADCAST_SPLAT]]
; IF-EVL-OUTLOOP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-OUTLOOP-NEXT: [[TMP18:%.*]] = icmp sle <vscale x 4 x i32> [[VP_OP_LOAD]], splat (i32 3)
@@ -282,9 +274,9 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[PREDPHI]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[PREDPHI1]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP11]])
; IF-EVL-OUTLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
-; IF-EVL-OUTLOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP23]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PREDPHI]])
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]]
@@ -326,10 +318,9 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-INLOOP: vector.body:
-; IF-EVL-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -338,9 +329,9 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
-; IF-EVL-INLOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]]
-; IF-EVL-INLOOP-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-INLOOP-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP23]]
+; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
; IF-EVL-INLOOP: scalar.ph:
@@ -443,7 +434,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
-; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[TMP11]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -518,11 +509,10 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP11]]
; IF-EVL-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-OUTLOOP: vector.body:
-; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT1:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = mul i32 1, [[TMP12]]
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP13]], i64 0
@@ -535,9 +525,9 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP19]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP18]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP12]])
; IF-EVL-OUTLOOP-NEXT: [[TMP20:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT1]] = add i64 [[TMP20]], [[EVL_BASED_IV1]]
-; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[EVL_BASED_IV]], [[TMP8]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N_VEC]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT1]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
@@ -580,11 +570,10 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP10]]
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-INLOOP: vector.body:
-; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT1:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[ADD:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV1]]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = mul i32 1, [[TMP11]]
; IF-EVL-INLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP12]], i64 0
@@ -597,10 +586,10 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[ADD]] = add i32 [[TMP17]], [[RDX]]
; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT1]] = add i64 [[TMP19]], [[EVL_BASED_IV1]]
-; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[EVL_BASED_IV]], [[TMP8]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; IF-EVL-INLOOP-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N_VEC]]
-; IF-EVL-INLOOP-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-INLOOP-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT1]], [[N]]
+; IF-EVL-INLOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
; IF-EVL-INLOOP: scalar.ph:
@@ -714,7 +703,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP15:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_IND]]
; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
-; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[TMP17]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -774,35 +763,26 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP6]]
; IF-EVL-OUTLOOP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-OUTLOOP-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; IF-EVL-OUTLOOP-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-OUTLOOP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
-; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
-; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = mul <vscale x 4 x i64> [[TMP10]], splat (i64 1)
-; IF-EVL-OUTLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP11]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
-; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP12]], splat (i32 1)
+; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP10]], splat (i32 1)
; IF-EVL-OUTLOOP-NEXT: [[INDUCTION1:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP13]]
; IF-EVL-OUTLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-OUTLOOP: vector.body:
-; IF-EVL-OUTLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND2:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT7:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
-; IF-EVL-OUTLOOP-NEXT: [[TMP15:%.*]] = mul i32 1, [[TMP14]]
-; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP15]], i64 0
+; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP14]], i64 0
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-OUTLOOP-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64
-; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = mul i64 1, [[TMP16]]
-; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP17]], i64 0
-; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-OUTLOOP-NEXT: [[TMP18:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = mul i32 1, [[TMP14]]
+; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
+; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; IF-EVL-OUTLOOP-NEXT: [[TMP18:%.*]] = icmp ult <vscale x 4 x i32> [[TMP12]], [[BROADCAST_SPLAT4]]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[ARRAYIDX]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp sle <vscale x 4 x i32> [[VP_OP_LOAD]], [[VEC_IND2]]
@@ -812,10 +792,9 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP24]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[PREDPHI]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP14]])
; IF-EVL-OUTLOOP-NEXT: [[TMP25:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP25]], [[IV]]
-; IF-EVL-OUTLOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT6]]
-; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT7]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT4]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
+; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT7]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT2]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP24]])
@@ -862,11 +841,10 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP10]]
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-INLOOP: vector.body:
-; IF-EVL-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = mul i32 1, [[TMP11]]
; IF-EVL-INLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP12]], i64 0
@@ -878,9 +856,9 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP17]] = add i32 [[TMP16]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[IV]]
-; IF-EVL-INLOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
@@ -1003,7 +981,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP15:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_IND]]
; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
-; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[TMP17]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
index f27f49e..ae047f5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
@@ -23,9 +23,8 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -37,8 +36,8 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -145,9 +144,8 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -159,8 +157,8 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -266,9 +264,8 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -280,8 +277,8 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
@@ -387,9 +384,8 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -401,8 +397,8 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
index 6a6bca2d..987f946 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
@@ -30,11 +30,10 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 33, i32 [[TMP11]]
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP25]], %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP12]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -44,8 +43,8 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FOR_END:.*]]
@@ -167,12 +166,11 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[VECTOR_RECUR_INIT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 22, i32 [[TMP14]]
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR2:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT1]], %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP32]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP15]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP15]])
@@ -183,8 +181,8 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP15]])
; IF-EVL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP15]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
+; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FOR_END:.*]]
@@ -325,13 +323,12 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[VECTOR_RECUR_INIT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 11, i32 [[TMP17]]
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR2:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT1]], %[[VECTOR_PH]] ], [ [[TMP22:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR4:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT3]], %[[VECTOR_PH]] ], [ [[TMP23:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP39]], %[[VECTOR_PH]] ], [ [[TMP18:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP18]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP20]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP18]])
@@ -344,8 +341,8 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP5]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP18]])
; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP18]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP27]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]]
+; IF-EVL-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FOR_END:.*]]
@@ -642,12 +639,11 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 33, i32 [[TMP10]]
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 2 x i64> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[TMP20:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP5]], %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP11]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP7]]
@@ -659,10 +655,10 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP15]], ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
+; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FOR_END:.*]]
; IF-EVL: [[SCALAR_PH]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index 892293b..2aeb1d0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -25,10 +25,9 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP10]]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -42,9 +41,9 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER2]], <vscale x 2 x ptr> align 4 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], [[TMP8]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
index e0594ad..3e23df7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
@@ -24,20 +24,19 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.add.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.add.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = add i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -77,7 +76,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = add i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -131,20 +130,20 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4
-; IF-EVL-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]])
-; IF-EVL-NEXT: [[MUL]] = mul i32 [[TMP5]], [[RDX]]
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD2]])
-; IF-EVL-NEXT: [[TMP6]] = mul i32 [[TMP8]], [[VEC_PHI1]]
+; IF-EVL-NEXT: [[MUL]] = mul i32 [[RDX]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD3]])
+; IF-EVL-NEXT: [[TMP5]] = mul i32 [[VEC_PHI1]], [[TMP4]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP6]], [[MUL]]
+; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP5]], [[MUL]]
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; IF-EVL: scalar.ph:
@@ -181,9 +180,9 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4
; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP7]] = mul i32 [[TMP6]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP7]] = mul i32 [[VEC_PHI]], [[TMP6]]
; NO-VP-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD2]])
-; NO-VP-NEXT: [[TMP9]] = mul i32 [[TMP8]], [[VEC_PHI1]]
+; NO-VP-NEXT: [[TMP9]] = mul i32 [[VEC_PHI1]], [[TMP8]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -240,20 +239,19 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.or.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.or.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = or i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -293,7 +291,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = or i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = or i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -349,20 +347,19 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.and.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.and.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = and i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -402,7 +399,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = and i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = and i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -458,20 +455,19 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.xor.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.xor.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = xor i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -511,7 +507,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = xor i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = xor i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
@@ -564,23 +560,22 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smin.nxv4i32(i32 2147483647, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smin.nxv4i32(i32 2147483647, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -676,23 +671,22 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smax.nxv4i32(i32 -2147483648, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smax.nxv4i32(i32 -2147483648, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smax.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -788,23 +782,22 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umin.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umin.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umin.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -900,23 +893,22 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umax.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umax.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umax.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1015,20 +1007,19 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = fadd reassoc float [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1068,7 +1059,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = fadd reassoc float [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = fadd reassoc float [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
@@ -1122,20 +1113,20 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4
-; IF-EVL-NEXT: [[TMP4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP5:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]])
-; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP5]], [[RDX]]
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD2]])
-; IF-EVL-NEXT: [[TMP6]] = fmul reassoc float [[TMP8]], [[VEC_PHI1]]
+; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[RDX]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD3]])
+; IF-EVL-NEXT: [[TMP5]] = fmul reassoc float [[VEC_PHI1]], [[TMP4]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP6]], [[MUL]]
+; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP5]], [[MUL]]
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; IF-EVL: scalar.ph:
@@ -1172,9 +1163,9 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4
; NO-VP-NEXT: [[TMP6:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP7]] = fmul reassoc float [[TMP6]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP7]] = fmul reassoc float [[VEC_PHI]], [[TMP6]]
; NO-VP-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD2]])
-; NO-VP-NEXT: [[TMP9]] = fmul reassoc float [[TMP8]], [[VEC_PHI1]]
+; NO-VP-NEXT: [[TMP9]] = fmul reassoc float [[VEC_PHI1]], [[TMP8]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
@@ -1228,24 +1219,23 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmin.nxv4f32(float 0x47EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmin.nxv4f32(float 0x47EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast olt float [[TMP14]], [[VEC_PHI]]
; IF-EVL-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP14]], float [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1342,24 +1332,23 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmax.nxv4f32(float 0xC7EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmax.nxv4f32(float 0xC7EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast ogt float [[TMP14]], [[VEC_PHI]]
; IF-EVL-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP14]], float [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1457,19 +1446,19 @@ define float @fminimum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8
-; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]])
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]])
+; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]])
+; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]])
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -1565,19 +1554,19 @@ define float @fmaximum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8
-; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]])
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]])
+; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]])
+; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]])
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -1675,23 +1664,22 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
-; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = fmul reassoc <vscale x 4 x float> [[VP_OP_LOAD]], [[VP_OP_LOAD1]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18]] = fadd reassoc float [[TMP17]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1736,7 +1724,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4
; NO-VP-NEXT: [[TMP11:%.*]] = fmul reassoc <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; NO-VP-NEXT: [[TMP12:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP11]])
-; NO-VP-NEXT: [[TMP13]] = fadd reassoc float [[TMP12]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP13]] = fadd reassoc float [[VEC_PHI]], [[TMP12]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
@@ -1796,20 +1784,19 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 4 x i32> [[VP_OP_LOAD]], splat (i32 3)
-; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP13:%.*]] = icmp slt <vscale x 4 x i32> [[VP_OP_LOAD]], splat (i32 3)
+; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]])
; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]]
@@ -1914,20 +1901,19 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = fcmp fast olt <vscale x 4 x float> [[VP_OP_LOAD]], splat (float 3.000000e+00)
-; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP13:%.*]] = fcmp fast olt <vscale x 4 x float> [[VP_OP_LOAD]], splat (float 3.000000e+00)
+; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]])
; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 80b0714..1f7c518 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -26,10 +26,9 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP12]]
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP13]]
@@ -44,9 +43,9 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP26]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
index c7808b1..be6ae1d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
@@ -42,10 +42,9 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: vector.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP10]], [[ENTRY]] ], [ [[TMP19:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META0:![0-9]+]]
@@ -53,9 +52,9 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-OUTLOOP-NEXT: [[TMP19]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP12]])
; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
-; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[TMP9]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP22:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP21]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
; IF-EVL-OUTLOOP-NEXT: store i32 [[TMP23]], ptr [[ADDR]], align 4, !alias.scope [[META7:![0-9]+]], !noalias [[META0]]
@@ -100,10 +99,9 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
; IF-EVL-INLOOP-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL-INLOOP: vector.body:
-; IF-EVL-INLOOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP13]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META0:![0-9]+]]
@@ -111,9 +109,9 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
-; IF-EVL-INLOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP12]]
-; IF-EVL-INLOOP-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-INLOOP-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP13]], [[TMP23]]
+; IF-EVL-INLOOP-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-INLOOP-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
; IF-EVL-INLOOP-NEXT: store i32 [[TMP22]], ptr [[ADDR]], align 4, !alias.scope [[META7:![0-9]+]], !noalias [[META0]]
; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
@@ -218,7 +216,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; NO-VP-INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META0:![0-9]+]]
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[TMP11]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
index bda9145..d474a03 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
@@ -22,18 +22,17 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i32 [[TMP9]], 4
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i32 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP_LOAD]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i32 [[TMP12]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[TMP10]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i32 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[TMP11]], [[TMP12]]
+; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
; IF-EVL: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
index 2601bfd..06c6bfe 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
@@ -26,9 +26,8 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[UMAX]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[UMAX]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
@@ -36,9 +35,9 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[UMAX]]
+; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]]
; CHECK: [[SCALAR_PH]]:
@@ -96,9 +95,8 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
@@ -106,9 +104,9 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
+; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]]
; CHECK: [[SCALAR_PH]]:
@@ -166,9 +164,8 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TC_ADD]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TC_ADD]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -176,8 +173,8 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP5]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC_ADD]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
index 226372f..5f407fc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
@@ -22,9 +22,8 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -35,9 +34,9 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP19]], <vscale x 4 x i1> [[TMP17]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP9]], [[TMP21]]
+; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[EXIT:%.*]]
; IF-EVL: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
index 41bddaa..59d1370 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
@@ -24,19 +24,18 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP14]] = call float @llvm.vp.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP9]], [[TMP15]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
index 3a963b0..2d5718b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
@@ -23,10 +23,9 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -34,8 +33,8 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]])
@@ -242,10 +241,9 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -253,8 +251,8 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP14]])
@@ -354,10 +352,9 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> splat (i32 -1), i32 [[START:%.*]], i32 0
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -365,8 +362,8 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP14]])
@@ -466,10 +463,9 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START:%.*]], i32 0
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -477,8 +473,8 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP14]])
@@ -579,10 +575,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -591,8 +586,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP15]])
@@ -698,10 +693,9 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -710,8 +704,8 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> [[TMP15]])
@@ -817,10 +811,9 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -829,8 +822,8 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> [[TMP15]])
@@ -936,10 +929,9 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -948,8 +940,8 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP15]])
@@ -1054,10 +1046,9 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -1065,8 +1056,8 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[VP_OP]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP14]])
@@ -1274,10 +1265,9 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1286,8 +1276,8 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP15]])
@@ -1393,10 +1383,9 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1405,8 +1394,8 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP15]])
@@ -1727,10 +1716,9 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <vscale x 4 x float> splat (float -0.000000e+00), float [[START:%.*]], i32 0
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -1740,8 +1728,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP17]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP16]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
+; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP20:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP17]])
@@ -1848,10 +1836,9 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1859,9 +1846,9 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP15]])
; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]]
@@ -1966,10 +1953,9 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1977,9 +1963,9 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP15]])
; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
index 62eef6c..e2db28d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
@@ -22,9 +22,8 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL:%.*]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], -1
@@ -48,9 +47,9 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
+; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
+; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[LOOPEND:%.*]]
; IF-EVL: scalar.ph:
@@ -165,9 +164,8 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL:%.*]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[OFFSET_IDX3:%.*]] = trunc i64 [[EVL_BASED_IV]] to i32
@@ -197,8 +195,8 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE5]], ptr align 4 [[TMP25]], <vscale x 4 x i1> [[VP_REVERSE_MASK6]], i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP28:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP28]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; IF-EVL-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]]
+; IF-EVL-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[LOOPEND:%.*]]
@@ -346,9 +344,8 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
@@ -382,9 +379,9 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_REVERSE2]], ptr align 1 [[TMP26]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP6]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP27]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; IF-EVL-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]]
+; IF-EVL-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; IF-EVL-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[EXIT:%.*]]
; IF-EVL: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
index a891eef..1c78b25 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
@@ -25,9 +25,8 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP5:%.*]] = sub i64 200, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP5:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP5]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
@@ -36,8 +35,8 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP5]], [[TMP13]]
+; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 200
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[EXIT:%.*]]
@@ -357,9 +356,8 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP5:%.*]] = sub i64 200, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP5:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP5]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
@@ -368,8 +366,8 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP5]], [[TMP13]]
+; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 200
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[EXIT:%.*]]
@@ -464,9 +462,8 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 3002, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 3002, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp ult i64 [[AVL]], 1024
; IF-EVL-NEXT: [[SAFE_AVL:%.*]] = select i1 [[TMP9]], i64 [[AVL]], i64 1024
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[SAFE_AVL]], i32 1, i1 true)
@@ -477,8 +474,8 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP5]], <vscale x 1 x i1> splat (i1 true), i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
-; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 3002
; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[EXIT:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
index 8ab3817..687a2e7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
@@ -25,9 +25,8 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TMP0]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[SPEC_SELECT]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[TMP12:%.*]] = sub nuw nsw i64 1, [[OFFSET_IDX]]
@@ -42,8 +41,8 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_REVERSE]], ptr align 8 [[TMP19]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
index 79ec73a..24649729 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
@@ -165,15 +165,14 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 {
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i32 9, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 9, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> align 1 [[BROADCAST_SPLAT2]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 9
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
index 4da31a0..dfdc893 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
@@ -21,16 +21,15 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) {
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 9, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 9, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0(<vscale x 8 x i8> zeroinitializer, <vscale x 8 x ptr> align 1 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 9
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
index bb2e099..6476373 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
@@ -32,9 +32,8 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count)
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 8 x ptr> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TMP0]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META0:![0-9]+]]
@@ -48,8 +47,8 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index d3c2ccf..568aa95 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -98,9 +98,8 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
-; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = load i64, ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0
@@ -109,8 +108,8 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
-; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
-; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
@@ -410,16 +409,18 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP6]]
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
-; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP11]]
; TF-SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
; TF-SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IND]], splat (i64 1024)
+; TF-SCALABLE-NEXT: [[TMP16:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = icmp ult <vscale x 4 x i32> [[TMP16]], [[BROADCAST_SPLAT4]]
; TF-SCALABLE-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 4 x i64> [[VEC_IND]], splat (i64 10)
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> zeroinitializer
; TF-SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> align 8 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP10]], i32 [[TMP7]])
@@ -428,10 +429,10 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[PREDPHI]], ptr align 8 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; TF-SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]]
-; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; TF-SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; TF-SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; TF-SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
; TF-SCALABLE: [[SCALAR_PH]]:
@@ -569,9 +570,8 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
-; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = load i64, ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0
@@ -580,8 +580,8 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
-; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
-; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
@@ -709,17 +709,16 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
-; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; TF-SCALABLE-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
-; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
-; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
+; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
@@ -868,10 +867,9 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
-; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP13]]
@@ -882,9 +880,9 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT3]], ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
-; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
@@ -1046,10 +1044,9 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
-; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP11]]
@@ -1061,9 +1058,9 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; TF-SCALABLE-NEXT: [[TMP14:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]]
-; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
@@ -1202,17 +1199,16 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
-; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; TF-SCALABLE-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
-; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
-; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
+; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
new file mode 100644
index 0000000..7c1ec9a
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt %s -S -mtriple riscv64 -passes=loop-vectorize --prefer-predicate-over-epilogue=predicate-dont-vectorize -force-tail-folding-style=data-with-evl -riscv-v-min-trip-count=0 -force-target-instruction-cost=1 -mattr=+v | FileCheck %s
+
+; Check canonical-iv is removed in single-iteration loop
+define void @foo(ptr %arg) #0 {
+; CHECK-LABEL: define void @foo(
+; CHECK-SAME: ptr [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 3, [[TMP2]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 3, i32 2, i1 true)
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> zeroinitializer, ptr align 8 [[ARG]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [3 x i64], ptr [[ARG]], i64 0, i64 [[IV]]
+; CHECK-NEXT: store i64 0, ptr [[GEP]], align 8
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 3
+; CHECK-NEXT: br i1 [[COND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep = getelementptr [3 x i64], ptr %arg, i64 0, i64 %iv
+ store i64 0, ptr %gep, align 8
+ %iv.next = add i64 %iv, 1
+ %cond = icmp eq i64 %iv.next, 3
+ br i1 %cond, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+attributes #0 = { vscale_range(2,2) }
+
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
index 7537a43..85116fe 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
@@ -22,9 +22,8 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -35,8 +34,8 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]]
-; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP20]]
+; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
index 7f1066c..c058789 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
@@ -20,7 +20,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -35,6 +35,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMAX]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -76,7 +77,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -91,6 +92,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMIN]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -132,7 +134,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -147,6 +149,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMAX]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -188,7 +191,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -203,6 +206,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMIN]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -244,7 +248,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -256,6 +260,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTLZ]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -295,7 +300,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -307,6 +312,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTTZ]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -346,7 +352,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -360,6 +366,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -401,7 +408,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -415,6 +422,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -456,7 +464,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -468,6 +476,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ABS]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
index c1b656a..8d3fe48 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
@@ -19,7 +19,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -31,6 +31,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SEXT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -72,7 +73,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -84,6 +85,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ZEXT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -123,7 +125,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -135,6 +137,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -174,7 +177,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -186,6 +189,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPEXT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -225,7 +229,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -237,6 +241,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -276,7 +281,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -288,6 +293,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SITOFP]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -327,7 +333,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -339,6 +345,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[UITOFP]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -378,7 +385,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -390,6 +397,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOSI]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -429,7 +437,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -441,6 +449,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOUI]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -480,7 +489,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -492,6 +501,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[INTTOPTR]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -532,7 +542,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: EMIT vp<[[INDEX:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[INDEX_NEXT:%.+]]>
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[INDEX_EVL:%.+]]> = phi ir<0>, vp<[[INDEX_EVL_NEXT:%.+]]>
; IF-EVL-NEXT: ir<[[IV:%.+]]> = WIDEN-INDUCTION ir<0>, ir<1>, vp<[[EVL]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<[[N]]>, vp<[[INDEX_EVL]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[INDEX_EVL]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: WIDEN-GEP Inv[Var] ir<[[GEP:%.+]]> = getelementptr inbounds ir<%b>, ir<[[IV]]>
@@ -542,6 +552,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[VECTOR_PTR]]>, ir<[[PTRTOINT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[ZEXT:%.+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[INDEX_EVL_NEXT]]> = add vp<[[ZEXT]]>, vp<[[INDEX_EVL]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[ZEXT]]>
; IF-EVL-NEXT: EMIT vp<[[INDEX_NEXT]]> = add vp<[[INDEX]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[INDEX_NEXT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
index 9900602..ab4bb90 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
@@ -26,8 +26,8 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<[[FOR_PHI:%.+]]> = phi ir<33>, ir<[[LD:%.+]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%TC>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[PREV_EVL:%.+]]> = phi [ vp<[[VF32]]>, vector.ph ], [ vp<[[EVL:%.+]]>, vector.body ]
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%TC>, vp<[[EVL_PHI]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds nuw ir<%A>, vp<[[ST]]
@@ -40,6 +40,7 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ADD]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
index 1c9554d..dff4971 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
@@ -40,7 +40,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-OUTLOOP-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-OUTLOOP-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX_PHI:%.+]]> = phi vp<[[RDX_START]]>, vp<[[RDX_SELECT:%.+]]>
-; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%n>, vp<[[EVL_PHI]]>
+; IF-EVL-OUTLOOP-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%n>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-OUTLOOP-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-OUTLOOP-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-OUTLOOP-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
@@ -50,6 +50,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: WIDEN-INTRINSIC vp<[[RDX_SELECT]]> = call llvm.vp.merge(ir<true>, ir<[[ADD]]>, ir<[[RDX_PHI]]>, vp<[[EVL]]>)
; IF-EVL-OUTLOOP-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-OUTLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-OUTLOOP-NEXT: No successors
@@ -79,7 +80,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-INLOOP-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-INLOOP-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX_PHI:%.+]]> = phi vp<[[RDX_START]]>, ir<[[RDX_NEXT:%.+]]>
-; IF-EVL-INLOOP-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%n>, vp<[[EVL_PHI]]>
+; IF-EVL-INLOOP-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%n>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-INLOOP-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-INLOOP-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-INLOOP-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
@@ -88,6 +89,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: REDUCE ir<[[ADD:%.+]]> = ir<[[RDX_PHI]]> + vp.reduce.add (ir<[[LD1]]>, vp<[[EVL]]>)
; IF-EVL-INLOOP-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-INLOOP-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-INLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-INLOOP-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
index 42a846a..b3a611e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
@@ -24,7 +24,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -39,6 +39,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[ADD]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
index 736f88e..e1d01ba 100644
--- a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
+++ b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
@@ -87,10 +87,10 @@ for.end:
; CHECK: %[[I1:.+]] = or disjoint i64 %index, 1
; CHECK: %[[I2:.+]] = or disjoint i64 %index, 2
; CHECK: %[[I3:.+]] = or disjoint i64 %index, 3
-; CHECK: getelementptr inbounds %pair, ptr %p, i64 %index, i32 0
-; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I1]], i32 0
-; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I2]], i32 0
-; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I3]], i32 0
+; CHECK: getelementptr inbounds %pair, ptr %p, i64 %index
+; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I1]]
+; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I2]]
+; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I3]]
; CHECK: getelementptr inbounds %pair, ptr %p, i64 %index, i32 1
; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I1]], i32 1
; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I2]], i32 1
@@ -102,7 +102,7 @@ for.end:
; INTER: vector.body
; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
; INTER-NOT: getelementptr
-; INTER: getelementptr inbounds %pair, ptr %p, i64 %index, i32 0
+; INTER: getelementptr inbounds %pair, ptr %p, i64 %index
; INTER-NOT: getelementptr
; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
;
@@ -145,10 +145,10 @@ for.end:
; CHECK: %[[I1:.+]] = add i64 [[OFFSET_IDX]], -1
; CHECK: %[[I2:.+]] = add i64 [[OFFSET_IDX]], -2
; CHECK: %[[I3:.+]] = add i64 [[OFFSET_IDX]], -3
-; CHECK: getelementptr inbounds %pair, ptr %p, i64 [[OFFSET_IDX]], i32 0
-; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I1]], i32 0
-; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I2]], i32 0
-; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I3]], i32 0
+; CHECK: getelementptr inbounds %pair, ptr %p, i64 [[OFFSET_IDX]]
+; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I1]]
+; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I2]]
+; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I3]]
; CHECK: getelementptr inbounds %pair, ptr %p, i64 [[OFFSET_IDX]], i32 1
; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I1]], i32 1
; CHECK: getelementptr inbounds %pair, ptr %p, i64 %[[I2]], i32 1
@@ -161,7 +161,7 @@ for.end:
; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
; INTER: [[OFFSET_IDX:%.+]] = sub i64 %n, %index
; INTER-NOT: getelementptr
-; INTER: %[[G0:.+]] = getelementptr inbounds %pair, ptr %p, i64 [[OFFSET_IDX]], i32 0
+; INTER: %[[G0:.+]] = getelementptr inbounds %pair, ptr %p, i64 [[OFFSET_IDX]]
; INTER: getelementptr inbounds i8, ptr %[[G0]], i64 -24
; INTER-NOT: getelementptr
; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
@@ -200,14 +200,14 @@ for.end:
; INTER-NOT: LV: Found uniform instruction: %tmp0 = getelementptr inbounds %pair, ptr %p, i64 %i, i32 0
; INTER: vector.body
; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, {{.*}} ]
-; INTER: %[[G0:.+]] = getelementptr inbounds %pair, ptr %p, i64 %index, i32 0
+; INTER: %[[G0:.+]] = getelementptr inbounds %pair, ptr %p, i64 %index
; INTER: %wide.vec = load <8 x i32>, ptr %[[G0]], align 8
-; INTER: %[[I1:.+]] = or disjoint i64 %index, 1
-; INTER: getelementptr inbounds %pair, ptr %p, i64 %[[I1]], i32 0
-; INTER: %[[I2:.+]] = or disjoint i64 %index, 2
-; INTER: getelementptr inbounds %pair, ptr %p, i64 %[[I2]], i32 0
-; INTER: %[[I3:.+]] = or disjoint i64 %index, 3
-; INTER: getelementptr inbounds %pair, ptr %p, i64 %[[I3]], i32 0
+; INTER: %[[G1:.+]] = getelementptr %pair, ptr %p, i64 %index
+; INTER: getelementptr i8, ptr %[[G1]], i64 8
+; INTER: %[[G2:.+]] = getelementptr %pair, ptr %p, i64 %index
+; INTER: getelementptr i8, ptr %[[G2]], i64 16
+; INTER: %[[G3:.+]] = getelementptr %pair, ptr %p, i64 %index
+; INTER: getelementptr i8, ptr %[[G3]], i64 24
; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
;
define void @predicated_store(ptr %p, i32 %x, i64 %n) {
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
index fb4545c..348625c 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
@@ -120,7 +120,7 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]], i32 1
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]], i32 1
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
@@ -129,7 +129,7 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i64 0
; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 0
; CHECK-NEXT: store i64 [[TMP7]], ptr [[TMP6]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
@@ -137,7 +137,7 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP4]], i64 1
; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
; CHECK: pred.store.if1:
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[WIDE_VEC]], i64 2
; CHECK-NEXT: store i64 [[TMP10]], ptr [[TMP9]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
@@ -157,7 +157,7 @@ define void @interleaved_with_cond_store_1(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[IF_MERGE:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]], i32 0
+; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]], i32 1
; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr [[P_1]], align 8
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[TMP14]], [[X]]
@@ -224,8 +224,8 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[INDEX]], i32 1
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
@@ -258,7 +258,7 @@ define void @interleaved_with_cond_store_2(ptr %p, i64 %x, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[IF_MERGE:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]], i32 0
+; CHECK-NEXT: [[P_0:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_1:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P]], i64 [[I]], i32 1
; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr [[P_1]], align 8
; CHECK-NEXT: store i64 [[X]], ptr [[P_0]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index 0afd2e1..add58758 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -113,7 +113,7 @@ define void @test_struct_array_load3_store3() {
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], splat (i32 1)
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1024 x %struct.ST3], ptr @S, i64 0, i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1024 x %struct.ST3], ptr @S, i64 0, i64 [[INDEX]]
; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[STRIDED_VEC2]], splat (i32 2)
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC3]], splat (i32 3)
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -192,7 +192,7 @@ define i32 @test_struct_load4(ptr nocapture readonly %S) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], ptr [[S:%.*]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], ptr [[S:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
@@ -268,7 +268,7 @@ define void @test_struct_store4(ptr noalias nocapture readonly %A, ptr noalias n
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], ptr [[B:%.*]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP1:%.*]] = shl nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1)
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 3)
; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 4)
@@ -346,7 +346,7 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 1023, i32 1022, i32 1021, i32 1020>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 -24
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -355,7 +355,7 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: [[REVERSE2:%.*]] = shufflevector <4 x i32> [[STRIDED_VEC1]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[REVERSE]], [[VEC_IND]]
; CHECK-NEXT: [[TMP3:%.*]] = sub nsw <4 x i32> [[REVERSE2]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST2]], ptr [[B:%.*]], i64 [[OFFSET_IDX]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST2]], ptr [[B:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 -24
; CHECK-NEXT: [[REVERSE3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[REVERSE4:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -578,10 +578,10 @@ define void @load_gap_reverse(ptr noalias nocapture %P1, ptr noalias nocapture %
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 1021, [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = sub i64 1020, [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i64> [[BROADCAST_SPLAT]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P1:%.*]], i64 [[OFFSET_IDX]], i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP0]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP1]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P1:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P1]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2:%.*]], i64 [[OFFSET_IDX]], i32 1
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP0]], i32 1
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2]], i64 [[TMP1]], i32 1
@@ -822,7 +822,7 @@ define void @int_float_struct(ptr nocapture readonly %A) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ <float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ <i32 undef, i32 0, i32 0, i32 0>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_INTFLOAT:%.*]], ptr [[A:%.*]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_INTFLOAT:%.*]], ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -905,10 +905,10 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) {
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 0
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 1
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 1
@@ -936,7 +936,7 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
+; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_X]], align 4
; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_Y]], align 4
@@ -993,7 +993,7 @@ define i32 @PR27626_1(ptr %p, i64 %n) {
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 1
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 1
@@ -1023,7 +1023,7 @@ define i32 @PR27626_1(ptr %p, i64 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S:%.*]] = phi i32 [ [[TMP18:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
+; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[P_I_X]], align 4
; CHECK-NEXT: store i32 [[TMP17]], ptr [[P_I_Y]], align 4
@@ -1082,10 +1082,10 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]], i32 0
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP2]], i32 1
@@ -1114,7 +1114,7 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
+; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_MINUS_1_X:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_X]], align 4
@@ -1173,7 +1173,7 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw <4 x i64> [[VEC_IND]], splat (i64 1)
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP2]], i64 0
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[TMP5]], i32 1
@@ -1210,7 +1210,7 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) {
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S:%.*]] = phi i32 [ [[TMP22:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[I_PLUS_1:%.*]] = add nuw nsw i64 [[I]], 1
-; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
+; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
; CHECK-NEXT: [[P_I_PLUS_1_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I_PLUS_1]], i32 1
; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[P_I_X]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
index cd44c3d..ffe118b 100644
--- a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
+++ b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
@@ -17,16 +17,42 @@ define void @loop_invariant_store(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[BROADCAST_SPLAT]] to <4 x i32>
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE8:.*]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE8]] ]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ule <4 x i32> [[VEC_IND]], splat (i32 8)
; CHECK-NEXT: [[TMP5:%.*]] = icmp sge <4 x i32> [[VEC_IND]], splat (i32 2)
; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP4]], <4 x i1> [[TMP5]], <4 x i1> zeroinitializer
; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP2]], <4 x i32> [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = shl <4 x i32> [[PREDPHI]], splat (i32 8)
; CHECK-NEXT: [[TMP8:%.*]] = trunc <4 x i32> [[TMP7]] to <4 x i8>
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP4]], i32 0
+; CHECK-NEXT: br i1 [[TMP16]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; CHECK: [[PRED_STORE_IF]]:
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i8> [[TMP8]], i32 0
+; CHECK-NEXT: store i8 [[TMP17]], ptr [[P]], align 1
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]]
+; CHECK: [[PRED_STORE_CONTINUE]]:
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP4]], i32 1
+; CHECK-NEXT: br i1 [[TMP11]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]]
+; CHECK: [[PRED_STORE_IF3]]:
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i8> [[TMP8]], i32 1
+; CHECK-NEXT: store i8 [[TMP12]], ptr [[P]], align 1
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE4]]
+; CHECK: [[PRED_STORE_CONTINUE4]]:
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP4]], i32 2
+; CHECK-NEXT: br i1 [[TMP13]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]]
+; CHECK: [[PRED_STORE_IF5]]:
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i8> [[TMP8]], i32 2
+; CHECK-NEXT: store i8 [[TMP14]], ptr [[P]], align 1
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE6]]
+; CHECK: [[PRED_STORE_CONTINUE6]]:
+; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP4]], i32 3
+; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8]]
+; CHECK: [[PRED_STORE_IF7]]:
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i8> [[TMP8]], i32 3
; CHECK-NEXT: store i8 [[TMP9]], ptr [[P]], align 1
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE8]]
+; CHECK: [[PRED_STORE_CONTINUE8]]:
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], 12
@@ -263,7 +289,6 @@ exit: ; preds = %loop.latch
}
; Test case for https://github.com/llvm/llvm-project/issues/149347.
-; FIXME: Currently mis-compiles.
define void @test_store_to_invariant_address_needs_mask_due_to_low_trip_count(ptr %dst) {
; CHECK-LABEL: define void @test_store_to_invariant_address_needs_mask_due_to_low_trip_count(
; CHECK-SAME: ptr [[DST:%.*]]) {
@@ -272,7 +297,26 @@ define void @test_store_to_invariant_address_needs_mask_due_to_low_trip_count(pt
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: br i1 true, label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; CHECK: [[PRED_STORE_IF]]:
+; CHECK-NEXT: store i32 1, ptr [[DST]], align 4
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]]
+; CHECK: [[PRED_STORE_CONTINUE]]:
+; CHECK-NEXT: br i1 true, label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]]
+; CHECK: [[PRED_STORE_IF1]]:
+; CHECK-NEXT: store i32 1, ptr [[DST]], align 4
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE2]]
+; CHECK: [[PRED_STORE_CONTINUE2]]:
+; CHECK-NEXT: br i1 true, label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]]
+; CHECK: [[PRED_STORE_IF3]]:
+; CHECK-NEXT: store i32 1, ptr [[DST]], align 4
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE4]]
+; CHECK: [[PRED_STORE_CONTINUE4]]:
+; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]]
+; CHECK: [[PRED_STORE_IF5]]:
; CHECK-NEXT: store i32 0, ptr [[DST]], align 4
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE6]]
+; CHECK: [[PRED_STORE_CONTINUE6]]:
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
index a85718d..4810952 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
@@ -289,7 +289,7 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound
; CHECK-NEXT: [[TMP24:%.*]] = phi <4 x i32> [ [[TMP18]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP23]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP25:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP24]], <4 x i32> splat (i32 -1)
; CHECK-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP25]])
-; CHECK-NEXT: [[TMP27]] = and i32 [[TMP26]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP27]] = and i32 [[VEC_PHI]], [[TMP26]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -420,10 +420,10 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond
; CHECK-NEXT: [[TMP40:%.*]] = phi <4 x i32> [ [[TMP30]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP38]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP39]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[TMP44:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP44]])
-; CHECK-NEXT: [[TMP46]] = add i32 [[TMP45]], [[TMP43]]
+; CHECK-NEXT: [[TMP46]] = add i32 [[TMP43]], [[TMP45]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP47]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
index 795605d..755d7e2 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
@@ -55,7 +55,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i32> [ [[TMP17]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP22]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP23]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; CHECK-NEXT: [[TMP26]] = add i32 [[TMP25]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP26]] = add i32 [[VEC_PHI]], [[TMP25]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -158,13 +158,13 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP37]], [[PRED_LOAD_IF7]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[VEC_IND1]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = add i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = add i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45:%.*]] = add i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45:%.*]] = add i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[TMP46:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP46]])
-; CHECK-NEXT: [[TMP48]] = add i32 [[TMP47]], [[TMP45]]
+; CHECK-NEXT: [[TMP48]] = add i32 [[TMP45]], [[TMP47]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], splat (i32 4)
@@ -256,10 +256,10 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i32> [ [[TMP17]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP22]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP23]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; CHECK-NEXT: [[TMP26:%.*]] = add i32 [[TMP25]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP26:%.*]] = add i32 [[VEC_PHI]], [[TMP25]]
; CHECK-NEXT: [[TMP27:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> splat (i32 3), <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP27]])
-; CHECK-NEXT: [[TMP29]] = add i32 [[TMP28]], [[TMP26]]
+; CHECK-NEXT: [[TMP29]] = add i32 [[TMP26]], [[TMP28]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -363,13 +363,13 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP37]], [[PRED_LOAD_IF7]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[VEC_IND1]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45:%.*]] = mul i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45:%.*]] = mul i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[TMP46:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP46]])
-; CHECK-NEXT: [[TMP48]] = mul i32 [[TMP47]], [[TMP45]]
+; CHECK-NEXT: [[TMP48]] = mul i32 [[TMP45]], [[TMP47]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], splat (i32 4)
@@ -478,11 +478,11 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP50:%.*]] = phi <4 x i32> [ [[TMP34]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP49]], [[PRED_LOAD_IF7]] ]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[VEC_IND1]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[TMP40:%.*]] = mul nsw <4 x i32> [[TMP50]], [[TMP39]]
; CHECK-NEXT: [[TMP44:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP44]])
-; CHECK-NEXT: [[TMP46]] = add i32 [[TMP45]], [[TMP43]]
+; CHECK-NEXT: [[TMP46]] = add i32 [[TMP43]], [[TMP45]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], splat (i32 4)
@@ -590,10 +590,10 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP37]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45]] = mul i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45]] = mul i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -698,10 +698,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP37]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> splat (i32 -1)
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = and i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = and i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> splat (i32 -1)
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45]] = and i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45]] = and i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -807,7 +807,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP40:%.*]] = add nsw <4 x i32> [[TMP39]], [[TMP38]]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43]] = or i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43]] = or i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -913,7 +913,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP40:%.*]] = add nsw <4 x i32> [[TMP39]], [[TMP38]]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43]] = xor i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43]] = xor i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -1124,10 +1124,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x float> [ [[TMP29]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP37]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select fast <4 x i1> [[TMP0]], <4 x float> [[TMP38]], <4 x float> splat (float 1.000000e+00)
; CHECK-NEXT: [[TMP41:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = fmul fast float [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = fmul fast float [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select fast <4 x i1> [[TMP0]], <4 x float> [[TMP39]], <4 x float> splat (float 1.000000e+00)
; CHECK-NEXT: [[TMP44:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP43]])
-; CHECK-NEXT: [[TMP45]] = fmul fast float [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45]] = fmul fast float [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
index 12d83eb..20b42c3 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
@@ -24,13 +24,13 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD4]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI1]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD5]])
-; CHECK-NEXT: [[TMP9]] = add i32 [[TMP8]], [[VEC_PHI2]]
+; CHECK-NEXT: [[TMP9]] = add i32 [[VEC_PHI2]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD6]])
-; CHECK-NEXT: [[TMP11]] = add i32 [[TMP10]], [[VEC_PHI3]]
+; CHECK-NEXT: [[TMP11]] = add i32 [[VEC_PHI3]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -96,21 +96,21 @@ define i64 @reduction_sum_chain(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x i64>, ptr [[TMP6]], align 8
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i64>, ptr [[TMP7]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[VEC_PHI]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD4]])
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[VEC_PHI1]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD5]])
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], [[VEC_PHI2]]
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[VEC_PHI2]], [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD6]])
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[VEC_PHI3]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[VEC_PHI3]], [[TMP14]]
; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD7]])
-; CHECK-NEXT: [[TMP17]] = add i64 [[TMP16]], [[TMP9]]
+; CHECK-NEXT: [[TMP17]] = add i64 [[TMP9]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD8]])
-; CHECK-NEXT: [[TMP19]] = add i64 [[TMP18]], [[TMP11]]
+; CHECK-NEXT: [[TMP19]] = add i64 [[TMP11]], [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD9]])
-; CHECK-NEXT: [[TMP21]] = add i64 [[TMP20]], [[TMP13]]
+; CHECK-NEXT: [[TMP21]] = add i64 [[TMP13]], [[TMP20]]
; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD10]])
-; CHECK-NEXT: [[TMP23]] = add i64 [[TMP22]], [[TMP15]]
+; CHECK-NEXT: [[TMP23]] = add i64 [[TMP15]], [[TMP22]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -332,16 +332,16 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP98:%.*]] = phi <4 x i32> [ [[TMP92]], [[PRED_LOAD_CONTINUE34]] ], [ [[TMP97]], [[PRED_LOAD_IF35]] ]
; CHECK-NEXT: [[TMP99:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP26]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP100:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP99]])
-; CHECK-NEXT: [[TMP101]] = add i32 [[TMP100]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP101]] = add i32 [[VEC_PHI]], [[TMP100]]
; CHECK-NEXT: [[TMP102:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP50]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP103:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP102]])
-; CHECK-NEXT: [[TMP104]] = add i32 [[TMP103]], [[VEC_PHI4]]
+; CHECK-NEXT: [[TMP104]] = add i32 [[VEC_PHI4]], [[TMP103]]
; CHECK-NEXT: [[TMP105:%.*]] = select <4 x i1> [[TMP2]], <4 x i32> [[TMP74]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP106:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP105]])
-; CHECK-NEXT: [[TMP107]] = add i32 [[TMP106]], [[VEC_PHI5]]
+; CHECK-NEXT: [[TMP107]] = add i32 [[VEC_PHI5]], [[TMP106]]
; CHECK-NEXT: [[TMP108:%.*]] = select <4 x i1> [[TMP3]], <4 x i32> [[TMP98]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP109:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP108]])
-; CHECK-NEXT: [[TMP110]] = add i32 [[TMP109]], [[VEC_PHI6]]
+; CHECK-NEXT: [[TMP110]] = add i32 [[VEC_PHI6]], [[TMP109]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP111:%.*]] = icmp eq i64 [[INDEX_NEXT]], 272
@@ -571,16 +571,16 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP106:%.*]] = phi <4 x i32> [ [[TMP100]], [[PRED_LOAD_CONTINUE36]] ], [ [[TMP105]], [[PRED_LOAD_IF37]] ]
; CHECK-NEXT: [[TMP107:%.*]] = select <4 x i1> [[TMP8]], <4 x i32> [[TMP34]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP108:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP107]])
-; CHECK-NEXT: [[TMP109]] = mul i32 [[TMP108]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP109]] = mul i32 [[VEC_PHI]], [[TMP108]]
; CHECK-NEXT: [[TMP110:%.*]] = select <4 x i1> [[TMP9]], <4 x i32> [[TMP58]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP111:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP110]])
-; CHECK-NEXT: [[TMP112]] = mul i32 [[TMP111]], [[VEC_PHI4]]
+; CHECK-NEXT: [[TMP112]] = mul i32 [[VEC_PHI4]], [[TMP111]]
; CHECK-NEXT: [[TMP113:%.*]] = select <4 x i1> [[TMP10]], <4 x i32> [[TMP82]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP114:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP113]])
-; CHECK-NEXT: [[TMP115]] = mul i32 [[TMP114]], [[VEC_PHI5]]
+; CHECK-NEXT: [[TMP115]] = mul i32 [[VEC_PHI5]], [[TMP114]]
; CHECK-NEXT: [[TMP116:%.*]] = select <4 x i1> [[TMP11]], <4 x i32> [[TMP106]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP117:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP116]])
-; CHECK-NEXT: [[TMP118]] = mul i32 [[TMP117]], [[VEC_PHI6]]
+; CHECK-NEXT: [[TMP118]] = mul i32 [[VEC_PHI6]], [[TMP117]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP119:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
index b302868..0529d84 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
@@ -15,7 +15,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -63,11 +63,11 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -118,7 +118,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[TMP3]] = add i32 [[TMP2]], 12
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -168,11 +168,11 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP7]] = mul i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP7]] = mul i32 [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -226,10 +226,10 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B1:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD2]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP6]] = add i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP6]] = add i32 [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -282,9 +282,9 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP5]] = mul i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5]] = mul i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
@@ -335,7 +335,7 @@ define i32 @start_at_non_zero(ptr nocapture %in, ptr nocapture %coeff, ptr nocap
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD2]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
@@ -385,9 +385,9 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP5]] = and i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5]] = and i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
@@ -438,7 +438,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = or i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = or i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
@@ -489,7 +489,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = xor i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = xor i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
@@ -589,9 +589,9 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast float [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP5]] = fmul fast float [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5]] = fmul fast float [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
@@ -826,11 +826,11 @@ define i32 @reduction_predicated(ptr noalias nocapture %A, ptr noalias nocapture
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -984,7 +984,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = fmul <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = fadd float [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = fadd float [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
@@ -1127,7 +1127,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) {
; CHECK-NEXT: [[TMP4:%.*]] = fmul <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP1]], <4 x float> [[TMP4]], <4 x float> splat (float -0.000000e+00)
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = fadd float [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = fadd float [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
@@ -1210,7 +1210,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
; CHECK-NEXT: [[TMP4:%.*]] = udiv <4 x i8> [[TMP3]], splat (i8 31)
; CHECK-NEXT: [[TMP5:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
@@ -1294,10 +1294,10 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
; CHECK-NEXT: [[TMP4:%.*]] = udiv <4 x i8> [[TMP3]], splat (i8 31)
; CHECK-NEXT: [[TMP5:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[VEC_PHI]], [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP9]])
-; CHECK-NEXT: [[TMP11]] = add i32 [[TMP10]], [[TMP8]]
+; CHECK-NEXT: [[TMP11]] = add i32 [[TMP8]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
index be0e0d1..5c52b1a 100644
--- a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
@@ -18,7 +18,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll
index 4612545..f8bda1c 100644
--- a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll
+++ b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll
@@ -389,22 +389,18 @@ define void @scev_exp_reuse_const_add(ptr %dst, ptr %src) {
; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
-; CHECK-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64
; CHECK-NEXT: br label %[[LOOP_1:.*]]
; CHECK: [[LOOP_1]]:
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], %[[LOOP_1]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ [[DST]], %[[ENTRY]] ], [ [[PTR_IV_1_NEXT:%.*]], %[[LOOP_1]] ]
; CHECK-NEXT: [[PTR_IV_1_NEXT]] = getelementptr i8, ptr [[PTR_IV_1]], i64 2
; CHECK-NEXT: [[C:%.*]] = call i1 @cond()
-; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: br i1 [[C]], label %[[LOOP_2_PH:.*]], label %[[LOOP_1]]
; CHECK: [[LOOP_2_PH]]:
-; CHECK-NEXT: [[INDVAR_LCSSA:%.*]] = phi i64 [ [[INDVAR]], %[[LOOP_1]] ]
; CHECK-NEXT: [[PTR_IV_1_NEXT_LCSSA:%.*]] = phi ptr [ [[PTR_IV_1_NEXT]], %[[LOOP_1]] ]
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
-; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[DST1]], [[SRC2]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDVAR_LCSSA]], 1
+; CHECK-NEXT: [[TMP0:%.*]] = sub i64 -2, [[SRC2]]
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR_IV_1_NEXT_LCSSA]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
@@ -426,11 +422,11 @@ define void @scev_exp_reuse_const_add(ptr %dst, ptr %src) {
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 40, %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_2_PH]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[LOOP_2_PH]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[LOOP_2_PH]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP_2:.*]]
; CHECK: [[LOOP_2]]:
; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP_2]] ]
-; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[BC_RESUME_VAL3]], %[[SCALAR_PH]] ], [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2]] ]
+; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ], [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2]] ]
; CHECK-NEXT: [[IV_2_NEXT]] = add i64 [[IV_1]], 1
; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV_2_NEXT]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC_1]], align 2
diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
index af2b238..efd9f8b 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
@@ -15,6 +15,13 @@ define void @ld_and_neg1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = and i64 [[INDEX]], -1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <2 x i64> [[WIDE_LOAD]], splat (i64 42)
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -54,6 +61,10 @@ define void @ld_and_neg2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -97,6 +108,11 @@ define void @ld_and_neg3_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> [[TMP7]], i64 [[TMP6]], i32 1
; CHECK-NEXT: [[TMP9:%.*]] = add nsw <2 x i64> [[TMP8]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[TMP10]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -351,6 +367,11 @@ define void @ld_and_neg2_step1_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> [[TMP7]], i64 [[TMP6]], i32 1
; CHECK-NEXT: [[TMP9:%.*]] = add nsw <2 x i64> [[TMP8]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[TMP10]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH]]
; CHECK: scalar.ph:
diff --git a/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml b/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml
index 5457e36..81df2f1 100644
--- a/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml
+++ b/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml
@@ -19,4 +19,12 @@ GlobalValueMap:
15859245615183425489: # guid("internal")
- Linkage: 7 # internal
Live: true
+ 1062103744896965210: # guid("alias1")
+ - Linkage: 4 # weak
+ Live: true
+ Aliasee: 16594175687743574550 # guid("external_addrtaken")
+ 2510616090736846890: # guid("alias2")
+ - Linkage: 0 # weak
+ Live: true
+ Aliasee: 16594175687743574550 # guid("external_addrtaken")
...
diff --git a/llvm/test/Transforms/LowerTypeTests/export-alias.ll b/llvm/test/Transforms/LowerTypeTests/export-alias.ll
index 45b4db6..25d3483 100644
--- a/llvm/test/Transforms/LowerTypeTests/export-alias.ll
+++ b/llvm/test/Transforms/LowerTypeTests/export-alias.ll
@@ -1,21 +1,19 @@
; RUN: opt -S %s -passes=lowertypetests -lowertypetests-summary-action=export -lowertypetests-read-summary=%S/Inputs/exported-funcs.yaml | FileCheck %s
;
-; CHECK: @alias1 = weak alias [8 x i8], ptr @external_addrtaken
-; CHECK: @alias2 = hidden alias [8 x i8], ptr @external_addrtaken
+; CHECK: @alias1 = alias [8 x i8], ptr @external_addrtaken
+; CHECK: @alias2 = alias [8 x i8], ptr @external_addrtaken
; CHECK-NOT: @alias3 = alias
; CHECK-NOT: @not_present
target triple = "x86_64-unknown-linux"
-!cfi.functions = !{!0, !2, !3}
-!aliases = !{!4, !5, !6}
+!cfi.functions = !{!0, !2, !3, !4}
+!aliases = !{!5, !6}
!0 = !{!"external_addrtaken", i8 0, !1}
!1 = !{i64 0, !"typeid1"}
-!2 = !{!"alias1", i8 1, !1}
-; alias2 not included here, this could happen if the only reference to alias2
-; is in a module compiled without cfi-icall
-!3 = !{!"alias3", i8 1, !1}
-!4 = !{!"alias1", !"external_addrtaken", i8 0, i8 1}
-!5 = !{!"alias2", !"external_addrtaken", i8 1, i8 0}
-!6 = !{!"alias3", !"not_present", i8 0, i8 0}
+!2 = !{!"alias1", i8 0, !1}
+!3 = !{!"alias2", i8 0, !1}
+!4 = !{!"alias3", i8 0, !1}
+!5 = !{!"external_addrtaken", !"alias1", !"alias2"}
+!6 = !{!"not_present", !"alias3"}
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
index a13c36f..b932a69 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
@@ -41,7 +41,7 @@ define void @arm_mean_q7(ptr noundef %pSrc, i32 noundef %blockSize, ptr noundef
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP4]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], [[SUM_0_LCSSA]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[SUM_0_LCSSA]], [[TMP6]]
; CHECK-NEXT: br label [[WHILE_END5]]
; CHECK: while.end5:
; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA]], [[WHILE_END]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
diff --git a/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll b/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll
index 19fbc1f..4455016 100644
--- a/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll
+++ b/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll
@@ -1,3 +1,6 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -O3 -S < %s | FileCheck %s
+
;; This tests lowering of the implementations of table-based ctz
;; algorithm to the llvm.cttz instruction in the -O3 case.
@@ -13,13 +16,17 @@
;; }
;; Compiled as: clang -O3 test.c -S -emit-llvm -Xclang -disable-llvm-optzns
-; RUN: opt -O3 -S < %s | FileCheck %s
-
-; CHECK: call range(i32 0, 33) i32 @llvm.cttz.i32
-
@ctz1.table = internal constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 16
-define i32 @ctz1(i32 noundef %x) {
+define i32 @ctz(i32 noundef %x) {
+; CHECK-LABEL: define range(i32 0, 32) i32 @ctz(
+; CHECK-SAME: i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[X]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[CONV:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: ret i32 [[CONV]]
+;
entry:
%x.addr = alloca i32, align 4
store i32 %x, ptr %x.addr, align 4
@@ -35,3 +42,28 @@ entry:
%conv = sext i8 %2 to i32
ret i32 %conv
}
+
+define i32 @ctz_nonarraygep(i32 noundef %x) {
+; CHECK-LABEL: define range(i32 0, 32) i32 @ctz_nonarraygep(
+; CHECK-SAME: i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[X]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[CONV:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %x.addr = alloca i32, align 4
+ store i32 %x, ptr %x.addr, align 4
+ %0 = load i32, ptr %x.addr, align 4
+ %1 = load i32, ptr %x.addr, align 4
+ %sub = sub i32 0, %1
+ %and = and i32 %0, %sub
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds i8, ptr @ctz1.table, i64 %idxprom
+ %2 = load i8, ptr %arrayidx, align 1
+ %conv = sext i8 %2 to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/Transforms/SCCP/no-fold-fcmp-dynamic-denormal-mode-issue114947.ll b/llvm/test/Transforms/SCCP/no-fold-fcmp-dynamic-denormal-mode-issue114947.ll
index 1eb5417..285122d 100644
--- a/llvm/test/Transforms/SCCP/no-fold-fcmp-dynamic-denormal-mode-issue114947.ll
+++ b/llvm/test/Transforms/SCCP/no-fold-fcmp-dynamic-denormal-mode-issue114947.ll
@@ -106,10 +106,11 @@ define <vscale x 2 x i1> @fold_fcmp_nondenormal_double_ieee_dynamic_scalable_vec
ret <vscale x 2 x i1> %cmp
}
-define <vscale x 2 x i1> @no_fold_fcmp_denormal_double_ieee_dynamic_scalaable_vector_splat() #0 {
-; CHECK-LABEL: define <vscale x 2 x i1> @no_fold_fcmp_denormal_double_ieee_dynamic_scalaable_vector_splat(
+define <vscale x 2 x i1> @no_fold_fcmp_denormal_double_ieee_dynamic_scalable_vector_splat() #0 {
+; CHECK-LABEL: define <vscale x 2 x i1> @no_fold_fcmp_denormal_double_ieee_dynamic_scalable_vector_splat(
; CHECK-SAME: ) #[[ATTR0]] {
-; CHECK-NEXT: ret <vscale x 2 x i1> splat (i1 true)
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 2 x double> splat (double 0x8000000000000), zeroinitializer
+; CHECK-NEXT: ret <vscale x 2 x i1> [[CMP]]
;
%cmp = fcmp une <vscale x 2 x double> splat (double 0x8000000000000), zeroinitializer
ret <vscale x 2 x i1> %cmp
diff --git a/llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll b/llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll
new file mode 100644
index 0000000..32b7719
--- /dev/null
+++ b/llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=simplifycfg -S < %s | FileCheck %s
+
+; Allow jump-threading when values defined in the block are live outside of the block
+; to those destinations in which the values are dead.
+
+define void @testA(ptr %ptrA, ptr %ptrB, i64 %a, i64 %b) {
+; CHECK-LABEL: define void @testA(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: store i64 [[TMP0]], ptr [[PTRB]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+; %value is live outside of block mainB, but jump-threading
+; can still occur to destination mainC, since %value is dead there.
+; Subsequent CFG simplifications will create one if block.
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ store i64 %value, ptr %ptrB
+ br label %mainC
+
+mainC:
+ ret void
+}
+
+
+define void @testB(ptr %ptrA, ptr %ptrB, i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: define void @testB(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK-NEXT: [[PTRC:%.*]] = select i1 [[COND2]], ptr [[PTR_ARM1]], ptr [[PTR_ARM2]]
+; CHECK-NEXT: store i64 [[TMP0]], ptr [[PTRC]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+; Use of %value is not in either immediate destination of mainB.
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ %cond2 = icmp slt i64 %a, %c
+ br i1 %cond2, label %ifB_arm1, label %ifB_arm2
+
+ifB_arm1:
+ %ptr_arm1 = getelementptr i64, ptr %ptrB, i64 8
+ br label %ifB_join
+
+ifB_arm2:
+ %ptr_arm2 = getelementptr i64, ptr %ptrB, i64 16
+ br label %ifB_join
+
+ifB_join:
+ %ptrC = phi ptr [ %ptr_arm1, %ifB_arm1 ], [ %ptr_arm2, %ifB_arm2 ]
+ store i64 %value, ptr %ptrC
+ br label %mainC
+
+mainC:
+ ret void
+}
+
+
+; Jump-threading is not done since %value is live in both destinations.
+define void @testA_negative(ptr %ptrA, ptr %ptrB, ptr %ptrD, i64 %a, i64 %b) {
+; CHECK-LABEL: define void @testA_negative(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], ptr [[PTRD:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*]]:
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINB:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: br label %[[MAINB]]
+; CHECK: [[MAINB]]:
+; CHECK-NEXT: [[VALUE:%.*]] = phi i64 [ [[TMP0]], %[[IFA]] ], [ 0, %[[MAINA]] ]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFB:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFB]]:
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRB]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRD]], align 4
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ store i64 %value, ptr %ptrB
+ br label %mainC
+
+mainC:
+ store i64 %value, ptr %ptrD
+ ret void
+}
+
+
+; Jump-threading is not done since %value is live in both destinations.
+define void @testB_negative(ptr %ptrA, ptr %ptrB, ptr %ptrD, i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: define void @testB_negative(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], ptr [[PTRD:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*]]:
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINB:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: br label %[[MAINB]]
+; CHECK: [[MAINB]]:
+; CHECK-NEXT: [[VALUE:%.*]] = phi i64 [ [[TMP0]], %[[IFA]] ], [ 0, %[[MAINA]] ]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFB:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFB]]:
+; CHECK-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK-NEXT: [[PTRC:%.*]] = select i1 [[COND2]], ptr [[PTR_ARM1]], ptr [[PTR_ARM2]]
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRC]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRD]], align 4
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ %cond2 = icmp slt i64 %a, %c
+ br i1 %cond2, label %ifB_arm1, label %ifB_arm2
+
+ifB_arm1:
+ %ptr_arm1 = getelementptr i64, ptr %ptrB, i64 8
+ br label %ifB_join
+
+ifB_arm2:
+ %ptr_arm2 = getelementptr i64, ptr %ptrB, i64 16
+ br label %ifB_join
+
+ifB_join:
+ %ptrC = phi ptr [ %ptr_arm1, %ifB_arm1 ], [ %ptr_arm2, %ifB_arm2 ]
+ store i64 %value, ptr %ptrC
+ br label %mainC
+
+mainC:
+ store i64 %value, ptr %ptrD
+ ret void
+}
+
diff --git a/llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll b/llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll
new file mode 100644
index 0000000..6868693
--- /dev/null
+++ b/llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=simplifycfg -S -max-jump-threading-live-blocks=3 < %s | FileCheck %s --check-prefixes=CHECK_LIMIT_3
+; RUN: opt -passes=simplifycfg -S -max-jump-threading-live-blocks=4 < %s | FileCheck %s --check-prefixes=CHECK_LIMIT_4
+
+; Test option -max-jump-threading-live-blocks=<num>
+
+define void @testB(ptr %ptrA, ptr %ptrB, i64 %a, i64 %b, i64 %c) {
+; CHECK_LIMIT_3-LABEL: define void @testB(
+; CHECK_LIMIT_3-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK_LIMIT_3-NEXT: [[MAINA:.*]]:
+; CHECK_LIMIT_3-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK_LIMIT_3-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINB:.*]]
+; CHECK_LIMIT_3: [[IFA]]:
+; CHECK_LIMIT_3-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[MAINB]]
+; CHECK_LIMIT_3: [[MAINB]]:
+; CHECK_LIMIT_3-NEXT: [[VALUE:%.*]] = phi i64 [ [[TMP0]], %[[IFA]] ], [ 0, %[[MAINA]] ]
+; CHECK_LIMIT_3-NEXT: br i1 [[COND]], label %[[IFB:.*]], label %[[MAINC:.*]]
+; CHECK_LIMIT_3: [[IFB]]:
+; CHECK_LIMIT_3-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK_LIMIT_3-NEXT: br i1 [[COND2]], label %[[IFB_ARM1:.*]], label %[[IFB_ARM2:.*]]
+; CHECK_LIMIT_3: [[IFB_ARM1]]:
+; CHECK_LIMIT_3-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK_LIMIT_3-NEXT: store i128 0, ptr [[PTR_ARM1]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[IFB_JOIN:.*]]
+; CHECK_LIMIT_3: [[IFB_ARM2]]:
+; CHECK_LIMIT_3-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK_LIMIT_3-NEXT: store i128 0, ptr [[PTR_ARM2]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[IFB_JOIN]]
+; CHECK_LIMIT_3: [[IFB_JOIN]]:
+; CHECK_LIMIT_3-NEXT: [[PTRC:%.*]] = phi ptr [ [[PTR_ARM1]], %[[IFB_ARM1]] ], [ [[PTR_ARM2]], %[[IFB_ARM2]] ]
+; CHECK_LIMIT_3-NEXT: store i64 [[VALUE]], ptr [[PTRC]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[MAINC]]
+; CHECK_LIMIT_3: [[MAINC]]:
+; CHECK_LIMIT_3-NEXT: ret void
+;
+; CHECK_LIMIT_4-LABEL: define void @testB(
+; CHECK_LIMIT_4-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK_LIMIT_4-NEXT: [[MAINA:.*:]]
+; CHECK_LIMIT_4-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK_LIMIT_4-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINC:.*]]
+; CHECK_LIMIT_4: [[IFA]]:
+; CHECK_LIMIT_4-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK_LIMIT_4-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK_LIMIT_4-NEXT: br i1 [[COND2]], label %[[IFB_ARM1:.*]], label %[[IFB_ARM2:.*]]
+; CHECK_LIMIT_4: [[IFB_ARM1]]:
+; CHECK_LIMIT_4-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK_LIMIT_4-NEXT: store i128 0, ptr [[PTR_ARM1]], align 4
+; CHECK_LIMIT_4-NEXT: br label %[[IFB_JOIN:.*]]
+; CHECK_LIMIT_4: [[IFB_ARM2]]:
+; CHECK_LIMIT_4-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK_LIMIT_4-NEXT: store i128 0, ptr [[PTR_ARM2]], align 4
+; CHECK_LIMIT_4-NEXT: br label %[[IFB_JOIN]]
+; CHECK_LIMIT_4: [[IFB_JOIN]]:
+; CHECK_LIMIT_4-NEXT: [[PTRC:%.*]] = phi ptr [ [[PTR_ARM1]], %[[IFB_ARM1]] ], [ [[PTR_ARM2]], %[[IFB_ARM2]] ]
+; CHECK_LIMIT_4-NEXT: store i64 [[TMP0]], ptr [[PTRC]], align 4
+; CHECK_LIMIT_4-NEXT: br label %[[MAINC]]
+; CHECK_LIMIT_4: [[MAINC]]:
+; CHECK_LIMIT_4-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+; Use of %value is not in either immediate destination of mainB.
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ %cond2 = icmp slt i64 %a, %c
+ br i1 %cond2, label %ifB_arm1, label %ifB_arm2
+
+ifB_arm1:
+ %ptr_arm1 = getelementptr i64, ptr %ptrB, i64 8
+ store i128 0, ptr %ptr_arm1
+ br label %ifB_join
+
+ifB_arm2:
+ %ptr_arm2 = getelementptr i64, ptr %ptrB, i64 16
+ store i128 0, ptr %ptr_arm2
+ br label %ifB_join
+
+ifB_join:
+ %ptrC = phi ptr [ %ptr_arm1, %ifB_arm1 ], [ %ptr_arm2, %ifB_arm2 ]
+ store i64 %value, ptr %ptrC
+ br label %mainC
+
+mainC:
+ ret void
+}
diff --git a/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll b/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
index efc04e9..74693c1 100644
--- a/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
+++ b/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
@@ -7,11 +7,16 @@ define hidden void @Func() !type !0 {
ret void
}
-; CHECK1: !aliases = !{![[A1:[0-9]+]], ![[A2:[0-9]+]], ![[A3:[0-9]+]]}
+; CHECK1: !cfi.functions = !{![[F1:[0-9]+]], ![[F2:[0-9]+]], ![[F3:[0-9]+]], ![[F4:[0-9]+]]}
+; CHECK1: !aliases = !{![[A:[0-9]+]]}
-; CHECK1: ![[A1]] = !{!"Alias", !"Func", i8 1, i8 0}
-; CHECK1: ![[A2]] = !{!"Hidden_Alias", !"Func", i8 1, i8 0}
-; CHECK1: ![[A3]] = !{!"Weak_Alias", !"Func", i8 0, i8 1}
+; CHECK1: ![[F1]] = !{!"Func", i8 0, ![[T:[0-9]+]]}
+; CHECK1: ![[T]] = !{i64 0, !"_ZTSFvvE"}
+; CHECK1: ![[F2]] = !{!"Alias", i8 0, ![[T]]}
+; CHECK1: ![[F3]] = !{!"Hidden_Alias", i8 0, ![[T]]}
+; CHECK1: ![[F4]] = !{!"Weak_Alias", i8 0, ![[T]]}
+;
+; CHECK1: ![[A]] = !{!"Func", !"Alias", !"Hidden_Alias", !"Weak_Alias"}
@Alias = hidden alias void (), ptr @Func
@Hidden_Alias = hidden alias void (), ptr @Func
@Weak_Alias = weak alias void (), ptr @Func
diff --git a/llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg b/llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg
new file mode 100644
index 0000000..78dd74c
--- /dev/null
+++ b/llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "SPIRV" in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll b/llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll
new file mode 100644
index 0000000..6f4c80d
--- /dev/null
+++ b/llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll
@@ -0,0 +1,889 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=vector-combine -data-layout=E -mtriple=spirv-unknown-vulkan1.3-library %s | FileCheck %s --check-prefix=SPIRV
+
+define void @insert_store(ptr %q, i8 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 3
+ store <16 x i8> %vecins, ptr %q, align 16
+ ret void
+}
+
+define void @insert_store_i16_align1(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_i16_align1(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[S]], i32 3
+; SPIRV-NEXT: store <8 x i16> [[VECINS]], ptr [[Q]], align 1
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <8 x i16>, ptr %q
+ %vecins = insertelement <8 x i16> %0, i16 %s, i32 3
+ store <8 x i16> %vecins, ptr %q, align 1
+ ret void
+}
+
+; To verify case when index is out of bounds
+define void @insert_store_outofbounds(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_outofbounds(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[S]], i32 9
+; SPIRV-NEXT: store <8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <8 x i16>, ptr %q
+ %vecins = insertelement <8 x i16> %0, i16 %s, i32 9
+ store <8 x i16> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_vscale(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_vscale(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 8 x i16> [[TMP0]], i16 [[S]], i32 3
+; SPIRV-NEXT: store <vscale x 8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 8 x i16>, ptr %q
+ %vecins = insertelement <vscale x 8 x i16> %0, i16 %s, i32 3
+ store <vscale x 8 x i16> %vecins, ptr %q
+ ret void
+}
+
+; To verify the case that index exceeds the minimum number
+; of elements of a scalable vector type.
+define void @insert_store_vscale_exceeds(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_vscale_exceeds(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 8 x i16> [[TMP0]], i16 [[S]], i32 9
+; SPIRV-NEXT: store <vscale x 8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 8 x i16>, ptr %q
+ %vecins = insertelement <vscale x 8 x i16> %0, i16 %s, i32 9
+ store <vscale x 8 x i16> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_v9i4(ptr %q, i4 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_v9i4(
+; SPIRV-SAME: ptr [[Q:%.*]], i4 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <9 x i4>, ptr [[Q]], align 8
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <9 x i4> [[TMP0]], i4 [[S]], i32 3
+; SPIRV-NEXT: store <9 x i4> [[VECINS]], ptr [[Q]], align 1
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <9 x i4>, ptr %q
+ %vecins = insertelement <9 x i4> %0, i4 %s, i32 3
+ store <9 x i4> %vecins, ptr %q, align 1
+ ret void
+}
+
+define void @insert_store_v4i27(ptr %q, i27 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_v4i27(
+; SPIRV-SAME: ptr [[Q:%.*]], i27 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <4 x i27>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <4 x i27> [[TMP0]], i27 [[S]], i32 3
+; SPIRV-NEXT: store <4 x i27> [[VECINS]], ptr [[Q]], align 1
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <4 x i27>, ptr %q
+ %vecins = insertelement <4 x i27> %0, i27 %s, i32 3
+ store <4 x i27> %vecins, ptr %q, align 1
+ ret void
+}
+
+define void @insert_store_v32i1(ptr %p) {
+; SPIRV-LABEL: define void @insert_store_v32i1(
+; SPIRV-SAME: ptr [[P:%.*]]) {
+; SPIRV-NEXT: [[VEC:%.*]] = load <32 x i1>, ptr [[P]], align 4
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <32 x i1> [[VEC]], i1 true, i64 0
+; SPIRV-NEXT: store <32 x i1> [[INS]], ptr [[P]], align 4
+; SPIRV-NEXT: ret void
+;
+ %vec = load <32 x i1>, ptr %p
+ %ins = insertelement <32 x i1> %vec, i1 true, i64 0
+ store <32 x i1> %ins, ptr %p
+ ret void
+}
+
+define void @insert_store_blk_differ(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_blk_differ(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: br label %[[CONT:.*]]
+; SPIRV: [[CONT]]:
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[S]], i32 3
+; SPIRV-NEXT: store <8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <8 x i16>, ptr %q
+ br label %cont
+cont:
+ %vecins = insertelement <8 x i16> %0, i16 %s, i32 3
+ store <8 x i16> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the case that the index is not a constant, and
+; the vector type is scalable.
+define void @insert_store_vscale_nonconst(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify align here is narrowed to scalar store size
+define void @insert_store_nonconst_large_alignment(ptr %q, i32 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_large_alignment(
+; SPIRV-SAME: ptr [[Q:%.*]], i32 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <4 x i32>, ptr [[Q]], align 128
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <4 x i32> [[I]], i32 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <4 x i32> [[VECINS]], ptr [[Q]], align 128
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ call void @llvm.assume(i1 %cmp)
+ %i = load <4 x i32>, ptr %q, align 128
+ %vecins = insertelement <4 x i32> %i, i32 %s, i32 %idx
+ store <4 x i32> %vecins, ptr %q, align 128
+ ret void
+}
+
+define void @insert_store_nonconst_align_maximum_8(ptr %q, i64 %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_align_maximum_8(
+; SPIRV-SAME: ptr [[Q:%.*]], i64 [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 2
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <8 x i64>, ptr [[Q]], align 8
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i64> [[I]], i64 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <8 x i64> [[VECINS]], ptr [[Q]], align 8
+; SPIRV-NEXT: ret void
+;
+ %cmp = icmp ult i32 %idx, 2
+ call void @llvm.assume(i1 %cmp)
+ %i = load <8 x i64>, ptr %q, align 8
+ %vecins = insertelement <8 x i64> %i, i64 %s, i32 %idx
+ store <8 x i64> %vecins, ptr %q, align 8
+ ret void
+}
+
+define void @insert_store_nonconst_align_maximum_4(ptr %q, i64 %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_align_maximum_4(
+; SPIRV-SAME: ptr [[Q:%.*]], i64 [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 2
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <8 x i64>, ptr [[Q]], align 4
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i64> [[I]], i64 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <8 x i64> [[VECINS]], ptr [[Q]], align 4
+; SPIRV-NEXT: ret void
+;
+ %cmp = icmp ult i32 %idx, 2
+ call void @llvm.assume(i1 %cmp)
+ %i = load <8 x i64>, ptr %q, align 4
+ %vecins = insertelement <8 x i64> %i, i64 %s, i32 %idx
+ store <8 x i64> %vecins, ptr %q, align 4
+ ret void
+}
+
+define void @insert_store_nonconst_align_larger(ptr %q, i64 %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_align_larger(
+; SPIRV-SAME: ptr [[Q:%.*]], i64 [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 2
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <8 x i64>, ptr [[Q]], align 4
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i64> [[I]], i64 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <8 x i64> [[VECINS]], ptr [[Q]], align 2
+; SPIRV-NEXT: ret void
+;
+ %cmp = icmp ult i32 %idx, 2
+ call void @llvm.assume(i1 %cmp)
+ %i = load <8 x i64>, ptr %q, align 4
+ %vecins = insertelement <8 x i64> %i, i64 %s, i32 %idx
+ store <8 x i64> %vecins, ptr %q, align 2
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant but valid by assume,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+declare void @maythrow() readnone
+
+define void @insert_store_nonconst_index_not_known_valid_by_assume_after_load(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_assume_after_load(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: call void @maythrow()
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ %0 = load <16 x i8>, ptr %q
+ call void @maythrow()
+ call void @llvm.assume(i1 %cmp)
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_not_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 17
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 17
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant and may not be valid by assume,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_not_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 17
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 17
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+declare void @llvm.assume(i1)
+
+define void @insert_store_nonconst_index_known_noundef_and_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant but valid by and,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_base_frozen_and_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_base_frozen_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_FROZEN:%.*]] = freeze i32 [[IDX]]
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX_FROZEN]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.frozen = freeze i32 %idx
+ %idx.clamped = and i32 %idx.frozen, 7
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_frozen_and_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_frozen_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[IDX_CLAMPED_FROZEN:%.*]] = freeze i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED_FROZEN]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %idx.clamped.frozen = freeze i32 %idx.clamped
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped.frozen
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_valid_by_and_but_may_be_poison(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_valid_by_and_but_may_be_poison(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant and may not be valid by and,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_not_known_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 31
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 31
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_noundef_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant but valid by urem,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_base_frozen_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_base_frozen_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_FROZEN:%.*]] = freeze i32 [[IDX]]
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX_FROZEN]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.frozen = freeze i32 %idx
+ %idx.clamped = urem i32 %idx.frozen, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_frozen_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_frozen_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[IDX_CLAMPED_FROZEN:%.*]] = freeze i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED_FROZEN]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %idx.clamped.frozen = freeze i32 %idx.clamped
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped.frozen
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_valid_by_urem_but_may_be_poison(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_valid_by_urem_but_may_be_poison(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 17
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 17
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant and may not be vaild by urem,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_not_known_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 17
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 17
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 17
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 17
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_ptr_strip(ptr %q, i8 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_ptr_strip(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 3
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @volatile_update(ptr %q, ptr %p, i8 zeroext %s) {
+; SPIRV-LABEL: define void @volatile_update(
+; SPIRV-SAME: ptr [[Q:%.*]], ptr [[P:%.*]], i8 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS0:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 3
+; SPIRV-NEXT: store volatile <16 x i8> [[VECINS0]], ptr [[Q]], align 16
+; SPIRV-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: [[VECINS1:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[S]], i32 1
+; SPIRV-NEXT: store <16 x i8> [[VECINS1]], ptr [[P]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins0 = insertelement <16 x i8> %0, i8 %s, i32 3
+ store volatile <16 x i8> %vecins0, ptr %q
+
+ %1 = load volatile <16 x i8>, ptr %p
+ %vecins1 = insertelement <16 x i8> %1, i8 %s, i32 1
+ store <16 x i8> %vecins1, ptr %p
+ ret void
+}
+
+define void @insert_store_addr_differ(ptr %p, ptr %q, i8 %s) {
+; SPIRV-LABEL: define void @insert_store_addr_differ(
+; SPIRV-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i8 [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[LD:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <16 x i8> [[LD]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[INS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %ld = load <16 x i8>, ptr %p
+ %ins = insertelement <16 x i8> %ld, i8 %s, i32 3
+ store <16 x i8> %ins, ptr %q
+ ret void
+}
+
+; We can't transform if any instr could modify memory in between.
+define void @insert_store_mem_modify(ptr %p, ptr %q, ptr noalias %r, i8 %s, i32 %m) {
+; SPIRV-LABEL: define void @insert_store_mem_modify(
+; SPIRV-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr noalias [[R:%.*]], i8 [[S:%.*]], i32 [[M:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[LD:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: store <16 x i8> zeroinitializer, ptr [[Q]], align 16
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <16 x i8> [[LD]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[INS]], ptr [[P]], align 16
+; SPIRV-NEXT: [[LD2:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: store <16 x i8> zeroinitializer, ptr [[R]], align 16
+; SPIRV-NEXT: [[INS2:%.*]] = insertelement <16 x i8> [[LD2]], i8 [[S]], i32 7
+; SPIRV-NEXT: store <16 x i8> [[INS2]], ptr [[Q]], align 16
+; SPIRV-NEXT: [[LD3:%.*]] = load <4 x i32>, ptr [[P]], align 16
+; SPIRV-NEXT: store <16 x i8> zeroinitializer, ptr [[P]], align 16
+; SPIRV-NEXT: [[INS3:%.*]] = insertelement <4 x i32> [[LD3]], i32 [[M]], i32 0
+; SPIRV-NEXT: store <4 x i32> [[INS3]], ptr [[P]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ ; p may alias q
+ %ld = load <16 x i8>, ptr %p
+ store <16 x i8> zeroinitializer, ptr %q
+ %ins = insertelement <16 x i8> %ld, i8 %s, i32 3
+ store <16 x i8> %ins, ptr %p
+
+ ; p never aliases r
+ %ld2 = load <16 x i8>, ptr %q
+ store <16 x i8> zeroinitializer, ptr %r
+ %ins2 = insertelement <16 x i8> %ld2, i8 %s, i32 7
+ store <16 x i8> %ins2, ptr %q
+
+ ; p must alias ptr0
+ %ld3 = load <4 x i32>, ptr %p
+ store <16 x i8> zeroinitializer, ptr %p
+ %ins3 = insertelement <4 x i32> %ld3, i32 %m, i32 0
+ store <4 x i32> %ins3, ptr %p
+
+ ret void
+}
+
+; Check cases when calls may modify memory
+define void @insert_store_with_call(ptr %p, ptr %q, i8 %s) {
+; SPIRV-LABEL: define void @insert_store_with_call(
+; SPIRV-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i8 [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[LD:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: call void @maywrite(ptr [[P]])
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <16 x i8> [[LD]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[INS]], ptr [[P]], align 16
+; SPIRV-NEXT: call void @foo()
+; SPIRV-NEXT: [[LD2:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: call void @nowrite(ptr [[P]])
+; SPIRV-NEXT: [[INS2:%.*]] = insertelement <16 x i8> [[LD2]], i8 [[S]], i32 7
+; SPIRV-NEXT: store <16 x i8> [[INS2]], ptr [[P]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %ld = load <16 x i8>, ptr %p
+ call void @maywrite(ptr %p)
+ %ins = insertelement <16 x i8> %ld, i8 %s, i32 3
+ store <16 x i8> %ins, ptr %p
+ call void @foo() ; Barrier
+ %ld2 = load <16 x i8>, ptr %p
+ call void @nowrite(ptr %p)
+ %ins2 = insertelement <16 x i8> %ld2, i8 %s, i32 7
+ store <16 x i8> %ins2, ptr %p
+ ret void
+}
+
+declare void @foo()
+declare void @maywrite(ptr)
+declare void @nowrite(ptr) readonly
+
+; To test if number of instructions in-between exceeds the limit (default 30),
+; the combine will quit.
+define i32 @insert_store_maximum_scan_instrs(i32 %arg, ptr %arg1, ptr %arg2, i8 zeroext %arg3) {
+; SPIRV-LABEL: define i32 @insert_store_maximum_scan_instrs(
+; SPIRV-SAME: i32 [[ARG:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i8 zeroext [[ARG3:%.*]]) {
+; SPIRV-NEXT: [[BB:.*:]]
+; SPIRV-NEXT: [[I:%.*]] = or i32 [[ARG]], 1
+; SPIRV-NEXT: [[I4:%.*]] = load <16 x i8>, ptr [[ARG2]], align 16
+; SPIRV-NEXT: [[I5:%.*]] = tail call i32 @bar(i32 [[I]], i1 true)
+; SPIRV-NEXT: [[I6:%.*]] = shl i32 [[ARG]], [[I5]]
+; SPIRV-NEXT: [[I7:%.*]] = lshr i32 [[I6]], 26
+; SPIRV-NEXT: [[I8:%.*]] = trunc i32 [[I7]] to i8
+; SPIRV-NEXT: [[I9:%.*]] = and i8 [[I8]], 31
+; SPIRV-NEXT: [[I10:%.*]] = lshr i32 [[I6]], 11
+; SPIRV-NEXT: [[I11:%.*]] = and i32 [[I10]], 32767
+; SPIRV-NEXT: [[I12:%.*]] = zext i8 [[I9]] to i64
+; SPIRV-NEXT: [[I13:%.*]] = getelementptr inbounds i16, ptr [[ARG1]], i64 [[I12]]
+; SPIRV-NEXT: [[I14:%.*]] = load i16, ptr [[I13]], align 2
+; SPIRV-NEXT: [[I15:%.*]] = zext i16 [[I14]] to i32
+; SPIRV-NEXT: [[I16:%.*]] = add nuw nsw i8 [[I9]], 1
+; SPIRV-NEXT: [[I17:%.*]] = zext i8 [[I16]] to i64
+; SPIRV-NEXT: [[I18:%.*]] = getelementptr inbounds i16, ptr [[ARG1]], i64 [[I17]]
+; SPIRV-NEXT: [[I19:%.*]] = load i16, ptr [[I18]], align 2
+; SPIRV-NEXT: [[I20:%.*]] = zext i16 [[I19]] to i32
+; SPIRV-NEXT: [[I21:%.*]] = sub nsw i32 [[I20]], [[I15]]
+; SPIRV-NEXT: [[I22:%.*]] = mul nsw i32 [[I11]], [[I21]]
+; SPIRV-NEXT: [[I23:%.*]] = ashr i32 [[I22]], 15
+; SPIRV-NEXT: [[I24:%.*]] = shl nuw nsw i32 [[I5]], 15
+; SPIRV-NEXT: [[I25:%.*]] = xor i32 [[I24]], 1015808
+; SPIRV-NEXT: [[I26:%.*]] = add nuw nsw i32 [[I25]], [[I15]]
+; SPIRV-NEXT: [[I27:%.*]] = add nsw i32 [[I26]], [[I23]]
+; SPIRV-NEXT: [[I28:%.*]] = sitofp i32 [[ARG]] to double
+; SPIRV-NEXT: [[I29:%.*]] = tail call double @llvm.log2.f64(double [[I28]])
+; SPIRV-NEXT: [[I30:%.*]] = fptosi double [[I29]] to i32
+; SPIRV-NEXT: [[I31:%.*]] = shl nsw i32 [[I30]], 15
+; SPIRV-NEXT: [[I32:%.*]] = or i32 [[I31]], 4
+; SPIRV-NEXT: [[I33:%.*]] = icmp eq i32 [[I27]], [[I32]]
+; SPIRV-NEXT: [[I34:%.*]] = select i1 [[I33]], i32 [[ARG]], i32 [[I31]]
+; SPIRV-NEXT: [[I35:%.*]] = lshr i32 [[I34]], 1
+; SPIRV-NEXT: [[I36:%.*]] = insertelement <16 x i8> [[I4]], i8 [[ARG3]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[I36]], ptr [[ARG2]], align 16
+; SPIRV-NEXT: ret i32 [[I35]]
+;
+bb:
+ %i = or i32 %arg, 1
+ %i4 = load <16 x i8>, ptr %arg2, align 16
+ %i5 = tail call i32 @bar(i32 %i, i1 true)
+ %i6 = shl i32 %arg, %i5
+ %i7 = lshr i32 %i6, 26
+ %i8 = trunc i32 %i7 to i8
+ %i9 = and i8 %i8, 31
+ %i10 = lshr i32 %i6, 11
+ %i11 = and i32 %i10, 32767
+ %i12 = zext i8 %i9 to i64
+ %i13 = getelementptr inbounds i16, ptr %arg1, i64 %i12
+ %i14 = load i16, ptr %i13, align 2
+ %i15 = zext i16 %i14 to i32
+ %i16 = add nuw nsw i8 %i9, 1
+ %i17 = zext i8 %i16 to i64
+ %i18 = getelementptr inbounds i16, ptr %arg1, i64 %i17
+ %i19 = load i16, ptr %i18, align 2
+ %i20 = zext i16 %i19 to i32
+ %i21 = sub nsw i32 %i20, %i15
+ %i22 = mul nsw i32 %i11, %i21
+ %i23 = ashr i32 %i22, 15
+ %i24 = shl nuw nsw i32 %i5, 15
+ %i25 = xor i32 %i24, 1015808
+ %i26 = add nuw nsw i32 %i25, %i15
+ %i27 = add nsw i32 %i26, %i23
+ %i28 = sitofp i32 %arg to double
+ %i29 = tail call double @llvm.log2.f64(double %i28)
+ %i30 = fptosi double %i29 to i32
+ %i31 = shl nsw i32 %i30, 15
+ %i32 = or i32 %i31, 4
+ %i33 = icmp eq i32 %i27, %i32
+ %i34 = select i1 %i33, i32 %arg, i32 %i31
+ %i35 = lshr i32 %i34, 1
+ %i36 = insertelement <16 x i8> %i4, i8 %arg3, i32 3
+ store <16 x i8> %i36, ptr %arg2, align 16
+ ret i32 %i35
+}
+
+declare i32 @bar(i32, i1) readonly
+declare double @llvm.log2.f64(double)
+
diff --git a/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test b/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test
index f2fe794..db223cd 100644
--- a/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test
+++ b/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test
@@ -5,7 +5,13 @@
# RUN: yaml2obj %t/stmt_seq_macho.o.yaml -o %t/stmt_seq_macho.o
# RUN: dsymutil --flat --verify-dwarf=none -oso-prepend-path %t %t/stmt_seq_macho.exe -o %t/stmt_seq_macho.dSYM
# RUN: llvm-dwarfdump --debug-info --debug-line -v %t/stmt_seq_macho.dSYM | sort | FileCheck %s -check-prefix=CHECK_DSYM
+# RUN: llvm-dwarfdump --debug-info --debug-line -v %t/stmt_seq_macho.dSYM > %t/stmt_seq_macho.dSYM.txt
+# RUN: cat %t/stmt_seq_macho.dSYM.txt | sort | FileCheck %s -check-prefix=CHECK_DSYM
+# RUN: cat %t/stmt_seq_macho.dSYM.txt | FileCheck %s -check-prefix=CHECK_NO_INVALID_OFFSET
+# RUN: cat stmt_seq_macho.dSYM.txt | grep DW_AT_LLVM_stmt_sequence | sort | uniq -d | wc -l | FileCheck %s -check-prefix=CHECK_NO_DUPLICATES
+# CHECK_NO_DUPLICATES: 0
+# CHECK_NO_INVALID_OFFSET-NOT: DW_AT_LLVM_stmt_sequence{{.*}}0xfffffff
# CHECK_DSYM: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] ([[OFFSET1:(0x[0-9a-f]+)]])
# CHECK_DSYM: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] ([[OFFSET2:(0x[0-9a-f]+)]])
# CHECK_DSYM: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] ([[OFFSET3:(0x[0-9a-f]+)]])
@@ -18,6 +24,9 @@
#--- stmt_seq_macho.cpp
#define ATTRIB extern "C" __attribute__((noinline))
+ATTRIB int function1_copy1(int a) {
+ return ++a;
+}
ATTRIB int function3_copy1(int a) {
int b = a + 3;
@@ -51,6 +60,7 @@ int main() {
sum += function2_copy2(3);
sum += function3_copy2(41);
sum += function2_copy1(11);
+ sum += function1_copy1(42);
length_error e("test");
return sum;
}
@@ -108,9 +118,9 @@ LoadCommands:
cmdsize: 1032
segname: ''
vmaddr: 0
- vmsize: 2793
+ vmsize: 3125
fileoff: 1208
- filesize: 2793
+ filesize: 3125
maxprot: 7
initprot: 7
nsects: 12
@@ -119,18 +129,18 @@ LoadCommands:
- sectname: __text
segname: __TEXT
addr: 0x0
- size: 128
+ size: 148
offset: 0x4B8
align: 2
- reloff: 0xFA8
- nreloc: 7
+ reloff: 0x10F0
+ nreloc: 8
flags: 0x80000400
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 00100011C0035FD600580051C0035FD600100011C0035FD600580051C0035FD6FFC300D1F44F01A9FD7B02A9FD8300916000805200000094F30300AA20058052000000941400130B6001805200000094F30300AA0100009021000091E03F0091000000948002130BFD7B42A9F44F41A9FFC30091C0035FD600000014C0035FD6
+ content: 00040011C0035FD600100011C0035FD600580051C0035FD600100011C0035FD600580051C0035FD6FFC300D1F44F01A9FD7B02A9FD8300916000805200000094F30300AA20058052000000941400130B6001805200000094F30300AA40058052000000947302000B0100009021000091E03F0091000000948002130BFD7B42A9F44F41A9FFC30091C0035FD600000014C0035FD6
relocations:
- - address: 0x78
+ - address: 0x8C
symbolnum: 4
pcrel: true
length: 2
@@ -138,7 +148,7 @@ LoadCommands:
type: 2
scattered: false
value: 0
- - address: 0x60
+ - address: 0x74
symbolnum: 3
pcrel: true
length: 2
@@ -146,7 +156,7 @@ LoadCommands:
type: 2
scattered: false
value: 0
- - address: 0x58
+ - address: 0x6C
symbolnum: 1
pcrel: false
length: 2
@@ -154,7 +164,7 @@ LoadCommands:
type: 4
scattered: false
value: 0
- - address: 0x54
+ - address: 0x68
symbolnum: 1
pcrel: true
length: 2
@@ -162,7 +172,7 @@ LoadCommands:
type: 3
scattered: false
value: 0
- - address: 0x4C
+ - address: 0x60
symbolnum: 5
pcrel: true
length: 2
@@ -170,16 +180,24 @@ LoadCommands:
type: 2
scattered: false
value: 0
- - address: 0x40
- symbolnum: 8
+ - address: 0x54
+ symbolnum: 6
pcrel: true
length: 2
extern: true
type: 2
scattered: false
value: 0
- - address: 0x34
- symbolnum: 6
+ - address: 0x48
+ symbolnum: 9
+ pcrel: true
+ length: 2
+ extern: true
+ type: 2
+ scattered: false
+ value: 0
+ - address: 0x3C
+ symbolnum: 7
pcrel: true
length: 2
extern: true
@@ -188,9 +206,9 @@ LoadCommands:
value: 0
- sectname: __cstring
segname: __TEXT
- addr: 0x80
+ addr: 0x94
size: 5
- offset: 0x538
+ offset: 0x54C
align: 0
reloff: 0x0
nreloc: 0
@@ -201,9 +219,9 @@ LoadCommands:
content: '7465737400'
- sectname: __debug_loc
segname: __DWARF
- addr: 0x85
+ addr: 0x99
size: 412
- offset: 0x53D
+ offset: 0x551
align: 0
reloff: 0x0
nreloc: 0
@@ -211,12 +229,12 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 00000000000000000400000000000000010050040000000000000008000000000000000400A301509F0000000000000000000000000000000000000000000000000400000000000000030070039F0000000000000000000000000000000008000000000000000C000000000000000100500C0000000000000010000000000000000400A301509F0000000000000000000000000000000010000000000000001400000000000000010050140000000000000018000000000000000400A301509F0000000000000000000000000000000010000000000000001400000000000000030070039F0000000000000000000000000000000018000000000000001C000000000000000100501C0000000000000020000000000000000400A301509F000000000000000000000000000000001C0000000000000020000000000000000100500000000000000000000000000000000030000000000000003C00000000000000030011009F3C0000000000000048000000000000000100634800000000000000540000000000000001006400000000000000000000000000000000
+ content: 08000000000000000C000000000000000100500C0000000000000010000000000000000400A301509F0000000000000000000000000000000008000000000000000C00000000000000030070039F0000000000000000000000000000000010000000000000001400000000000000010050140000000000000018000000000000000400A301509F0000000000000000000000000000000018000000000000001C000000000000000100501C0000000000000020000000000000000400A301509F0000000000000000000000000000000018000000000000001C00000000000000030070039F0000000000000000000000000000000020000000000000002400000000000000010050240000000000000028000000000000000400A301509F00000000000000000000000000000000240000000000000028000000000000000100500000000000000000000000000000000038000000000000004400000000000000030011009F4400000000000000500000000000000001006350000000000000005C0000000000000001006400000000000000000000000000000000
- sectname: __debug_abbrev
segname: __DWARF
- addr: 0x221
- size: 359
- offset: 0x6D9
+ addr: 0x235
+ size: 372
+ offset: 0x6ED
align: 0
reloff: 0x0
nreloc: 0
@@ -226,18 +244,34 @@ LoadCommands:
reserved3: 0x0
- sectname: __debug_info
segname: __DWARF
- addr: 0x388
- size: 686
- offset: 0x840
+ addr: 0x3A9
+ size: 747
+ offset: 0x861
align: 0
- reloff: 0xFE0
- nreloc: 14
+ reloff: 0x1130
+ nreloc: 16
flags: 0x2000000
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
relocations:
- - address: 0x26A
+ - address: 0x2A7
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
+ - address: 0x28E
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
+ - address: 0x253
symbolnum: 1
pcrel: false
length: 3
@@ -245,7 +279,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x251
+ - address: 0x1F5
symbolnum: 1
pcrel: false
length: 3
@@ -253,7 +287,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x216
+ - address: 0x1E1
symbolnum: 1
pcrel: false
length: 3
@@ -261,7 +295,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x1B8
+ - address: 0x1CE
symbolnum: 1
pcrel: false
length: 3
@@ -269,7 +303,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x1A5
+ - address: 0x1BA
symbolnum: 1
pcrel: false
length: 3
@@ -277,7 +311,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x191
+ - address: 0x1A7
symbolnum: 1
pcrel: false
length: 3
@@ -285,7 +319,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x17E
+ - address: 0x169
symbolnum: 1
pcrel: false
length: 3
@@ -293,7 +327,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x140
+ - address: 0x12D
symbolnum: 1
pcrel: false
length: 3
@@ -301,7 +335,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x104
+ - address: 0xF1
symbolnum: 1
pcrel: false
length: 3
@@ -309,7 +343,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0xC8
+ - address: 0xC4
symbolnum: 1
pcrel: false
length: 3
@@ -317,7 +351,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x9B
+ - address: 0x88
symbolnum: 1
pcrel: false
length: 3
@@ -351,9 +385,9 @@ LoadCommands:
value: 0
- sectname: __debug_str
segname: __DWARF
- addr: 0x636
- size: 239
- offset: 0xAEE
+ addr: 0x694
+ size: 400
+ offset: 0xB4C
align: 0
reloff: 0x0
nreloc: 0
@@ -363,9 +397,9 @@ LoadCommands:
reserved3: 0x0
- sectname: __apple_names
segname: __DWARF
- addr: 0x725
- size: 260
- offset: 0xBDD
+ addr: 0x824
+ size: 288
+ offset: 0xCDC
align: 0
reloff: 0x0
nreloc: 0
@@ -373,12 +407,12 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 485341480100000008000000080000000C000000000000000100000001000600000000000200000005000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF90D9F86F88CB36CF4908311CD1125E5389CB36CF4A08311C522B70536A7F9A7C8000000094000000A4000000B4000000C4000000D4000000E4000000F40000008A0000000200000015020000690200000000000055000000010000009A0000000000000045000000010000005E00000000000000A3000000010000001502000000000000750000000100000003010000000000006500000001000000C700000000000000BB00000001000000690200000000000085000000010000003F01000000000000
+ content: 485341480100000009000000090000000C00000000000000010000000100060000000000FFFFFFFFFFFFFFFF0100000003000000040000000600000007000000080000004A08311CC78E3C8288CB36CF89CB36CFD1125E53522B705390D9F86F6A7F9A7C4908311C8C0000009C000000AC000000BC000000CC000000DC000000EC00000000010000100100000601000001000000F000000000000000D6000000010000005E00000000000000F600000001000000C30000000000000016010000010000002C01000000000000440100000100000052020000000000005C01000001000000A6020000000000002B0100000200000052020000A60200000000000026010000010000006801000000000000E6000000010000008700000000000000
- sectname: __apple_objc
segname: __DWARF
- addr: 0x829
+ addr: 0x944
size: 36
- offset: 0xCE1
+ offset: 0xDFC
align: 0
reloff: 0x0
nreloc: 0
@@ -389,9 +423,9 @@ LoadCommands:
content: 485341480100000001000000000000000C000000000000000100000001000600FFFFFFFF
- sectname: __apple_namespac
segname: __DWARF
- addr: 0x84D
+ addr: 0x968
size: 36
- offset: 0xD05
+ offset: 0xE20
align: 0
reloff: 0x0
nreloc: 0
@@ -402,9 +436,9 @@ LoadCommands:
content: 485341480100000001000000000000000C000000000000000100000001000600FFFFFFFF
- sectname: __apple_types
segname: __DWARF
- addr: 0x871
+ addr: 0x98C
size: 195
- offset: 0xD29
+ offset: 0xE44
align: 0
reloff: 0x0
nreloc: 0
@@ -412,21 +446,29 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 48534148010000000500000005000000140000000000000003000000010006000300050004000B000000000002000000FFFFFFFF03000000040000007CA8F05D90D9F86F5B738CDC3080880B6320957C64000000770000008A0000009D000000B00000009700000001000000EA010000130000000000008A00000001000000C80100001300000000000031000000010000005700000024000000000000D300000001000000A1020000240000000000002C000000010000005000000024000000000000
+ content: 48534148010000000500000005000000140000000000000003000000010006000300050004000B000000000002000000FFFFFFFF03000000040000007CA8F05D90D9F86F5B738CDC3080880B6320957C64000000770000008A0000009D000000B0000000380100000100000027020000130000000000002B010000010000000502000013000000000000C20000000100000057000000240000000000007401000001000000DE02000024000000000000BD000000010000005000000024000000000000
- sectname: __debug_frame
segname: __DWARF
- addr: 0x938
- size: 208
- offset: 0xDF0
+ addr: 0xA50
+ size: 232
+ offset: 0xF08
align: 3
- reloff: 0x1050
- nreloc: 7
+ reloff: 0x11B0
+ nreloc: 8
flags: 0x2000000
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 14000000FFFFFFFF0400080001781E0C1F00000000000000140000000000000000000000000000000800000000000000140000000000000008000000000000000800000000000000140000000000000010000000000000000800000000000000140000000000000018000000000000000800000000000000240000000000000020000000000000005800000000000000500C1D109E019D02930394040000000014000000000000007800000000000000040000000000000014000000000000007C000000000000000400000000000000
+ content: 14000000FFFFFFFF0400080001781E0C1F00000000000000140000000000000000000000000000000800000000000000140000000000000008000000000000000800000000000000140000000000000010000000000000000800000000000000140000000000000018000000000000000800000000000000140000000000000020000000000000000800000000000000240000000000000028000000000000006400000000000000500C1D109E019D02930394040000000014000000000000008C000000000000000400000000000000140000000000000090000000000000000400000000000000
relocations:
+ - address: 0xD8
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
- address: 0xC0
symbolnum: 1
pcrel: false
@@ -435,7 +477,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0xA8
+ - address: 0x98
symbolnum: 1
pcrel: false
length: 3
@@ -485,18 +527,26 @@ LoadCommands:
value: 0
- sectname: __debug_line
segname: __DWARF
- addr: 0xA08
- size: 225
- offset: 0xEC0
+ addr: 0xB38
+ size: 253
+ offset: 0xFF0
align: 0
- reloff: 0x1088
- nreloc: 7
+ reloff: 0x11F0
+ nreloc: 8
flags: 0x2000000
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
relocations:
- - address: 0xD1
+ - address: 0xED
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
+ - address: 0xD9
symbolnum: 1
pcrel: false
length: 3
@@ -504,7 +554,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0xBD
+ - address: 0xAA
symbolnum: 1
pcrel: false
length: 3
@@ -512,7 +562,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x92
+ - address: 0x96
symbolnum: 1
pcrel: false
length: 3
@@ -560,21 +610,21 @@ LoadCommands:
ntools: 0
- cmd: LC_LINKER_OPTIMIZATION_HINT
cmdsize: 16
- dataoff: 4288
+ dataoff: 4656
datasize: 8
- cmd: LC_SYMTAB
cmdsize: 24
- symoff: 4296
- nsyms: 10
- stroff: 4456
- strsize: 144
+ symoff: 4664
+ nsyms: 11
+ stroff: 4840
+ strsize: 168
- cmd: LC_DYSYMTAB
cmdsize: 80
ilocalsym: 0
nlocalsym: 3
iextdefsym: 3
- nextdefsym: 7
- iundefsym: 10
+ nextdefsym: 8
+ iundefsym: 11
nundefsym: 0
tocoff: 0
ntoc: 0
@@ -590,7 +640,7 @@ LoadCommands:
nlocrel: 0
LinkEditData:
NameList:
- - n_strx: 138
+ - n_strx: 155
n_type: 0xE
n_sect: 1
n_desc: 0
@@ -599,47 +649,52 @@ LinkEditData:
n_type: 0xE
n_sect: 2
n_desc: 0
- n_value: 128
- - n_strx: 132
+ n_value: 148
+ - n_strx: 149
n_type: 0xE
n_sect: 2
n_desc: 0
- n_value: 128
+ n_value: 148
- n_strx: 39
n_type: 0xF
n_sect: 1
n_desc: 192
- n_value: 120
+ n_value: 140
- n_strx: 14
n_type: 0xF
n_sect: 1
n_desc: 192
- n_value: 124
+ n_value: 144
+ - n_strx: 132
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 0
+ n_value: 0
- n_strx: 115
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 8
+ n_value: 16
- n_strx: 81
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 24
+ n_value: 32
- n_strx: 98
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 0
+ n_value: 8
- n_strx: 64
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 16
+ n_value: 24
- n_strx: 8
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 32
+ n_value: 40
StringTable:
- ''
- l_.str
@@ -650,16 +705,25 @@ LinkEditData:
- _function2_copy2
- _function3_copy1
- _function2_copy1
+ - _function1_copy1
- ltmp1
- ltmp0
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
DWARF:
debug_str:
- - ''
+ - 'Facebook clang version 19.1.5 (https://git.internal.tfbnw.net/repos/git/rw/osmeta/external/llvm-project b36c9ae1f8f2b39e4aafb9ca4700c608c3036365)'
- stmt_seq_macho.cpp
- '/'
- '/private/tmp/stmt_seq'
- char
- __ARRAY_SIZE_TYPE__
+ - function1_copy1
- function3_copy1
- function2_copy1
- function3_copy2
@@ -786,6 +850,18 @@ DWARF:
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_decl_file
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_decl_line
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_type
+ Form: DW_FORM_ref4
+ - Code: 0xA
+ Tag: DW_TAG_formal_parameter
+ Children: DW_CHILDREN_no
+ Attributes:
- Attribute: DW_AT_location
Form: DW_FORM_sec_offset
- Attribute: DW_AT_name
@@ -796,7 +872,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0xA
+ - Code: 0xB
Tag: DW_TAG_variable
Children: DW_CHILDREN_no
Attributes:
@@ -810,7 +886,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0xB
+ - Code: 0xC
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -836,7 +912,7 @@ DWARF:
Form: DW_FORM_flag_present
- Attribute: DW_AT_APPLE_optimized
Form: DW_FORM_flag_present
- - Code: 0xC
+ - Code: 0xD
Tag: DW_TAG_variable
Children: DW_CHILDREN_no
Attributes:
@@ -850,7 +926,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0xD
+ - Code: 0xE
Tag: DW_TAG_call_site
Children: DW_CHILDREN_yes
Attributes:
@@ -858,7 +934,7 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_call_return_pc
Form: DW_FORM_addr
- - Code: 0xE
+ - Code: 0xF
Tag: DW_TAG_call_site_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -866,7 +942,7 @@ DWARF:
Form: DW_FORM_exprloc
- Attribute: DW_AT_call_value
Form: DW_FORM_exprloc
- - Code: 0xF
+ - Code: 0x10
Tag: DW_TAG_structure_type
Children: DW_CHILDREN_yes
Attributes:
@@ -880,7 +956,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_decl_line
Form: DW_FORM_data1
- - Code: 0x10
+ - Code: 0x11
Tag: DW_TAG_inheritance
Children: DW_CHILDREN_no
Attributes:
@@ -888,7 +964,7 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_data_member_location
Form: DW_FORM_data1
- - Code: 0x11
+ - Code: 0x12
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -906,7 +982,7 @@ DWARF:
Form: DW_FORM_flag_present
- Attribute: DW_AT_explicit
Form: DW_FORM_flag_present
- - Code: 0x12
+ - Code: 0x13
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -914,13 +990,13 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_artificial
Form: DW_FORM_flag_present
- - Code: 0x13
+ - Code: 0x14
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0x14
+ - Code: 0x15
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -936,13 +1012,13 @@ DWARF:
Form: DW_FORM_flag_present
- Attribute: DW_AT_APPLE_optimized
Form: DW_FORM_flag_present
- - Code: 0x15
+ - Code: 0x16
Tag: DW_TAG_pointer_type
Children: DW_CHILDREN_no
Attributes:
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0x16
+ - Code: 0x17
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -964,7 +1040,7 @@ DWARF:
Form: DW_FORM_strp
- Attribute: DW_AT_specification
Form: DW_FORM_ref4
- - Code: 0x17
+ - Code: 0x18
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -976,7 +1052,7 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_artificial
Form: DW_FORM_flag_present
- - Code: 0x18
+ - Code: 0x19
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -990,7 +1066,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0x19
+ - Code: 0x1A
Tag: DW_TAG_call_site
Children: DW_CHILDREN_yes
Attributes:
@@ -1001,7 +1077,7 @@ DWARF:
- Attribute: DW_AT_call_pc
Form: DW_FORM_addr
debug_info:
- - Length: 0x2AA
+ - Length: 0x2E7
Version: 4
AbbrevTableID: 0
AbbrOffset: 0x0
@@ -1011,20 +1087,20 @@ DWARF:
Values:
- Value: 0x0
- Value: 0x21
- - Value: 0x1
- - Value: 0x14
+ - Value: 0x92
+ - Value: 0xA5
- Value: 0x0
- - Value: 0x16
+ - Value: 0xA7
- Value: 0x1
- Value: 0x0
- - Value: 0x80
+ - Value: 0x94
- AbbrCode: 0x2
Values:
- Value: 0x3F
- Value: 0x1
- - Value: 0x23
+ - Value: 0x27
- Value: 0x9
- BlockData: [ 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ BlockData: [ 0x3, 0x94, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0 ]
- AbbrCode: 0x3
Values:
@@ -1039,12 +1115,12 @@ DWARF:
- Value: 0x50
- AbbrCode: 0x6
Values:
- - Value: 0x2C
+ - Value: 0xBD
- Value: 0x6
- Value: 0x1
- AbbrCode: 0x7
Values:
- - Value: 0x31
+ - Value: 0xC2
- Value: 0x8
- Value: 0x7
- AbbrCode: 0x8
@@ -1056,285 +1132,318 @@ DWARF:
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x45
+ - Value: 0xD6
- Value: 0x1
- - Value: 0x3
- - Value: 0x2A1
+ - Value: 0x2
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- AbbrCode: 0x9
Values:
- - Value: 0x0
- - Value: 0xD7
+ - Value: 0x178
+ - Value: 0x1
+ - Value: 0x2
+ - Value: 0x2DE
+ - AbbrCode: 0x0
+ - AbbrCode: 0x8
+ Values:
+ - Value: 0x8
+ - Value: 0x8
+ - Value: 0x1
+ - Value: 0x4A
+ - Value: 0x1
+ BlockData: [ 0x6F ]
+ - Value: 0x1
+ - Value: 0xE6
+ - Value: 0x1
+ - Value: 0x6
+ - Value: 0x2DE
+ - Value: 0x1
- Value: 0x1
- - Value: 0x3
- - Value: 0x2A1
- AbbrCode: 0xA
Values:
+ - Value: 0x0
+ - Value: 0x178
+ - Value: 0x1
+ - Value: 0x6
+ - Value: 0x2DE
+ - AbbrCode: 0xB
+ Values:
- Value: 0x39
- - Value: 0xD9
+ - Value: 0x17A
- Value: 0x1
- - Value: 0x4
- - Value: 0x2A1
+ - Value: 0x7
+ - Value: 0x2DE
- AbbrCode: 0x0
- AbbrCode: 0x8
Values:
- - Value: 0x8
+ - Value: 0x10
- Value: 0x8
- Value: 0x1
- - Value: 0x4A
+ - Value: 0x60
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x55
+ - Value: 0xF6
- Value: 0x1
- - Value: 0x8
- - Value: 0x2A1
+ - Value: 0xB
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x9
+ - AbbrCode: 0xA
Values:
- Value: 0x5E
- - Value: 0xD7
+ - Value: 0x178
- Value: 0x1
- - Value: 0x8
- - Value: 0x2A1
+ - Value: 0xB
+ - Value: 0x2DE
- AbbrCode: 0x0
- AbbrCode: 0x8
Values:
- - Value: 0x10
+ - Value: 0x18
- Value: 0x8
- Value: 0x1
- - Value: 0x60
+ - Value: 0x78
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x65
+ - Value: 0x106
- Value: 0x1
- - Value: 0xC
- - Value: 0x2A1
+ - Value: 0xF
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x9
+ - AbbrCode: 0xA
Values:
- Value: 0x97
- - Value: 0xD7
+ - Value: 0x178
- Value: 0x1
- - Value: 0xC
- - Value: 0x2A1
- - AbbrCode: 0xA
+ - Value: 0xF
+ - Value: 0x2DE
+ - AbbrCode: 0xB
Values:
- Value: 0xD0
- - Value: 0xD9
+ - Value: 0x17A
- Value: 0x1
- - Value: 0xD
- - Value: 0x2A1
+ - Value: 0x10
+ - Value: 0x2DE
- AbbrCode: 0x0
- AbbrCode: 0x8
Values:
- - Value: 0x18
+ - Value: 0x20
- Value: 0x8
- Value: 0x1
- - Value: 0x78
+ - Value: 0x90
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x75
+ - Value: 0x116
- Value: 0x1
- - Value: 0x11
- - Value: 0x2A1
+ - Value: 0x14
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x9
+ - AbbrCode: 0xA
Values:
- Value: 0xF5
- - Value: 0xD7
+ - Value: 0x178
- Value: 0x1
- - Value: 0x11
- - Value: 0x2A1
- - AbbrCode: 0xA
+ - Value: 0x14
+ - Value: 0x2DE
+ - AbbrCode: 0xB
Values:
- Value: 0x12E
- - Value: 0xDB
+ - Value: 0x17C
- Value: 0x1
- - Value: 0x12
- - Value: 0x2A1
+ - Value: 0x15
+ - Value: 0x2DE
- AbbrCode: 0x0
- - AbbrCode: 0xB
+ - AbbrCode: 0xC
Values:
- - Value: 0x20
- - Value: 0x58
- - Value: 0x8F
+ - Value: 0x28
+ - Value: 0x64
+ - Value: 0xA7
- Value: 0x1
BlockData: [ 0x6D ]
- Value: 0x1
- - Value: 0x85
+ - Value: 0x126
- Value: 0x1
- - Value: 0x1E
- - Value: 0x2A1
+ - Value: 0x21
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0xC
+ - AbbrCode: 0xD
Values:
- Value: 0x2
BlockData: [ 0x8F, 0xF ]
- - Value: 0xE2
+ - Value: 0x183
- Value: 0x1
- - Value: 0x23
- - Value: 0x1C8
- - AbbrCode: 0xA
+ - Value: 0x27
+ - Value: 0x205
+ - AbbrCode: 0xB
Values:
- Value: 0x151
- - Value: 0xE4
+ - Value: 0x185
- Value: 0x1
- - Value: 0x1F
- - Value: 0x2A1
- - AbbrCode: 0xD
- Values:
- - Value: 0x103
- - Value: 0x38
+ - Value: 0x22
+ - Value: 0x2DE
- AbbrCode: 0xE
Values:
+ - Value: 0x12C
+ - Value: 0x40
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x1
BlockData: [ 0x33 ]
- AbbrCode: 0x0
- - AbbrCode: 0xD
- Values:
- - Value: 0xC7
- - Value: 0x44
- AbbrCode: 0xE
Values:
+ - Value: 0xF0
+ - Value: 0x4C
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x2
BlockData: [ 0x10, 0x29 ]
- AbbrCode: 0x0
- - AbbrCode: 0xD
- Values:
- - Value: 0x9A
- - Value: 0x50
- AbbrCode: 0xE
Values:
+ - Value: 0xC3
+ - Value: 0x58
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x1
BlockData: [ 0x3B ]
- AbbrCode: 0x0
- - AbbrCode: 0xD
+ - AbbrCode: 0xE
Values:
- - Value: 0x215
+ - Value: 0x5E
- Value: 0x64
+ - AbbrCode: 0xF
+ Values:
+ - Value: 0x1
+ BlockData: [ 0x50 ]
+ - Value: 0x2
+ BlockData: [ 0x10, 0x2A ]
+ - AbbrCode: 0x0
- AbbrCode: 0xE
Values:
+ - Value: 0x252
+ - Value: 0x78
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x2
BlockData: [ 0x8F, 0xF ]
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0xF
+ - AbbrCode: 0x10
Values:
- Value: 0x5
- - Value: 0x8A
+ - Value: 0x12B
- Value: 0x1
- Value: 0x1
- - Value: 0x1A
- - AbbrCode: 0x10
+ - Value: 0x1D
+ - AbbrCode: 0x11
Values:
- - Value: 0x1EA
+ - Value: 0x227
- Value: 0x0
- - AbbrCode: 0x11
+ - AbbrCode: 0x12
Values:
- - Value: 0x8A
+ - Value: 0x12B
- Value: 0x1
- - Value: 0x1B
+ - Value: 0x1E
- Value: 0x1
- Value: 0x1
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x12
+ - AbbrCode: 0x13
Values:
- - Value: 0x210
+ - Value: 0x24D
- Value: 0x1
- - AbbrCode: 0x13
+ - AbbrCode: 0x14
Values:
- - Value: 0x20B
+ - Value: 0x248
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0xF
+ - AbbrCode: 0x10
Values:
- Value: 0x5
- - Value: 0x97
+ - Value: 0x138
- Value: 0x1
- Value: 0x1
- - Value: 0x16
- - AbbrCode: 0x14
+ - Value: 0x19
+ - AbbrCode: 0x15
Values:
- - Value: 0x97
+ - Value: 0x138
- Value: 0x1
- - Value: 0x17
+ - Value: 0x1A
- Value: 0x1
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x12
+ - AbbrCode: 0x13
Values:
- - Value: 0x206
+ - Value: 0x243
- Value: 0x1
- - AbbrCode: 0x13
+ - AbbrCode: 0x14
Values:
- - Value: 0x20B
+ - Value: 0x248
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0x15
+ - AbbrCode: 0x16
Values:
- - Value: 0x1EA
- - AbbrCode: 0x15
+ - Value: 0x227
+ - AbbrCode: 0x16
Values:
- Value: 0x4B
- - AbbrCode: 0x15
- Values:
- - Value: 0x1C8
- AbbrCode: 0x16
Values:
- - Value: 0x78
+ - Value: 0x205
+ - AbbrCode: 0x17
+ Values:
+ - Value: 0x8C
- Value: 0x4
- Value: 0x1
- - Value: 0xB7
+ - Value: 0xD3
- Value: 0x1
BlockData: [ 0x6F ]
- - Value: 0x234
+ - Value: 0x271
- Value: 0x1
- - Value: 0xA3
- - Value: 0x1D7
- - AbbrCode: 0x17
+ - Value: 0x144
+ - Value: 0x214
+ - AbbrCode: 0x18
Values:
- Value: 0x1
BlockData: [ 0x50 ]
- - Value: 0xE8
- - Value: 0x2A8
+ - Value: 0x189
+ - Value: 0x2E5
- Value: 0x1
- - AbbrCode: 0x18
+ - AbbrCode: 0x19
Values:
- Value: 0x1
BlockData: [ 0x51 ]
- - Value: 0xED
+ - Value: 0x18E
- Value: 0x1
- - Value: 0x1B
- - Value: 0x20B
- - AbbrCode: 0x19
+ - Value: 0x1E
+ - Value: 0x248
+ - AbbrCode: 0x1A
Values:
- - Value: 0x269
+ - Value: 0x2A6
- Value: 0x1
- - Value: 0x78
- - AbbrCode: 0xE
+ - Value: 0x8C
+ - AbbrCode: 0xF
Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x3
BlockData: [ 0xA3, 0x1, 0x50 ]
- - AbbrCode: 0xE
+ - AbbrCode: 0xF
Values:
- Value: 0x1
BlockData: [ 0x51 ]
@@ -1342,45 +1451,45 @@ DWARF:
BlockData: [ 0xA3, 0x1, 0x51 ]
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0x16
+ - AbbrCode: 0x17
Values:
- - Value: 0x7C
+ - Value: 0x90
- Value: 0x4
- Value: 0x1
- - Value: 0xCB
+ - Value: 0xE7
- Value: 0x1
BlockData: [ 0x6F ]
- - Value: 0x288
+ - Value: 0x2C5
- Value: 0x1
- - Value: 0xBB
- - Value: 0x1D7
- - AbbrCode: 0x17
+ - Value: 0x15C
+ - Value: 0x214
+ - AbbrCode: 0x18
Values:
- Value: 0x1
BlockData: [ 0x50 ]
- - Value: 0xE8
- - Value: 0x2A8
+ - Value: 0x189
+ - Value: 0x2E5
- Value: 0x1
- - AbbrCode: 0x18
+ - AbbrCode: 0x19
Values:
- Value: 0x1
BlockData: [ 0x51 ]
- - Value: 0xED
+ - Value: 0x18E
- Value: 0x1
- - Value: 0x1B
- - Value: 0x20B
+ - Value: 0x1E
+ - Value: 0x248
- AbbrCode: 0x0
- AbbrCode: 0x6
Values:
- - Value: 0xD3
+ - Value: 0x174
- Value: 0x5
- Value: 0x4
- - AbbrCode: 0x15
+ - AbbrCode: 0x16
Values:
- - Value: 0x1C8
+ - Value: 0x205
- AbbrCode: 0x0
debug_line:
- - Length: 221
+ - Length: 249
Version: 4
PrologueLength: 42
MinInstLength: 1
@@ -1397,17 +1506,17 @@ DWARF:
Length: 0
Opcodes:
- Opcode: DW_LNS_set_column
- Data: 14
+ Data: 10
- Opcode: DW_LNS_set_prologue_end
Data: 0
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
Data: 0
- - Opcode: 0x16
+ - Opcode: 0x14
Data: 0
- Opcode: DW_LNS_set_column
- Data: 5
+ Data: 3
- Opcode: DW_LNS_negate_stmt
Data: 0
- Opcode: 0x4A
@@ -1424,7 +1533,7 @@ DWARF:
ExtLen: 9
SubOpcode: DW_LNE_set_address
Data: 8
- - Opcode: 0x1A
+ - Opcode: 0x19
Data: 0
- Opcode: DW_LNS_set_column
Data: 5
@@ -1445,7 +1554,7 @@ DWARF:
SubOpcode: DW_LNE_set_address
Data: 16
- Opcode: DW_LNS_advance_line
- SData: 13
+ SData: 11
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1460,7 +1569,7 @@ DWARF:
SubOpcode: DW_LNE_end_sequence
Data: 0
- Opcode: DW_LNS_set_column
- Data: 20
+ Data: 14
- Opcode: DW_LNS_set_prologue_end
Data: 0
- Opcode: DW_LNS_extended_op
@@ -1468,24 +1577,47 @@ DWARF:
SubOpcode: DW_LNE_set_address
Data: 24
- Opcode: DW_LNS_advance_line
- SData: 17
+ SData: 16
Data: 0
- Opcode: DW_LNS_copy
Data: 0
- Opcode: DW_LNS_set_column
Data: 5
- - Opcode: 0x4B
+ - Opcode: DW_LNS_negate_stmt
+ Data: 0
+ - Opcode: 0x4A
Data: 0
- Opcode: DW_LNS_extended_op
ExtLen: 1
SubOpcode: DW_LNE_end_sequence
Data: 0
+ - Opcode: DW_LNS_set_column
+ Data: 20
+ - Opcode: DW_LNS_set_prologue_end
+ Data: 0
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
Data: 32
- Opcode: DW_LNS_advance_line
- SData: 29
+ SData: 20
+ Data: 0
+ - Opcode: DW_LNS_copy
+ Data: 0
+ - Opcode: DW_LNS_set_column
+ Data: 5
+ - Opcode: 0x4B
+ Data: 0
+ - Opcode: DW_LNS_extended_op
+ ExtLen: 1
+ SubOpcode: DW_LNE_end_sequence
+ Data: 0
+ - Opcode: DW_LNS_extended_op
+ ExtLen: 9
+ SubOpcode: DW_LNE_set_address
+ Data: 40
+ - Opcode: DW_LNS_advance_line
+ SData: 32
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1509,9 +1641,15 @@ DWARF:
Data: 0
- Opcode: 0x4B
Data: 0
+ - Opcode: 0xBB
+ Data: 0
+ - Opcode: DW_LNS_set_column
+ Data: 9
+ - Opcode: 0x81
+ Data: 0
- Opcode: DW_LNS_set_column
Data: 18
- - Opcode: 0xBB
+ - Opcode: 0x4C
Data: 0
- Opcode: DW_LNS_set_column
Data: 9
@@ -1534,9 +1672,9 @@ DWARF:
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
- Data: 120
+ Data: 140
- Opcode: DW_LNS_advance_line
- SData: 26
+ SData: 29
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1551,9 +1689,9 @@ DWARF:
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
- Data: 124
+ Data: 144
- Opcode: DW_LNS_advance_line
- SData: 26
+ SData: 29
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1604,7 +1742,7 @@ LoadCommands:
- sectname: __text
segname: __TEXT
addr: 0x1000002F0
- size: 112
+ size: 132
offset: 0x2F0
align: 2
reloff: 0x0
@@ -1613,12 +1751,12 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 00580051C0035FD600100011C0035FD6FFC300D1F44F01A9FD7B02A9FD83009160008052F7FFFF97F30300AA20058052F6FFFF971400130B60018052F1FFFF97F30300AA610100101F2003D5E03F0091060000948002130BFD7B42A9F44F41A9FFC30091C0035FD601000014C0035FD6
+ content: 00040011C0035FD600580051C0035FD600100011C0035FD6FFC300D1F44F01A9FD7B02A9FD83009160008052F7FFFF97F30300AA20058052F6FFFF971400130B60018052F1FFFF97F30300AA40058052ECFFFF977302000B610100101F2003D5E03F0091060000948002130BFD7B42A9F44F41A9FFC30091C0035FD601000014C0035FD6
- sectname: __cstring
segname: __TEXT
- addr: 0x100000360
+ addr: 0x100000374
size: 5
- offset: 0x360
+ offset: 0x374
align: 0
reloff: 0x0
nreloc: 0
@@ -1631,9 +1769,9 @@ LoadCommands:
cmdsize: 72
segname: __LINKEDIT
vmaddr: 4294983680
- vmsize: 960
+ vmsize: 1040
fileoff: 16384
- filesize: 960
+ filesize: 1040
maxprot: 1
initprot: 1
nsects: 0
@@ -1649,20 +1787,20 @@ LoadCommands:
lazy_bind_off: 0
lazy_bind_size: 0
export_off: 16384
- export_size: 96
+ export_size: 112
- cmd: LC_SYMTAB
cmdsize: 24
- symoff: 16488
- nsyms: 22
- stroff: 16840
- strsize: 192
+ symoff: 16504
+ nsyms: 25
+ stroff: 16904
+ strsize: 208
- cmd: LC_DYSYMTAB
cmdsize: 80
ilocalsym: 0
- nlocalsym: 17
- iextdefsym: 17
- nextdefsym: 5
- iundefsym: 22
+ nlocalsym: 19
+ iextdefsym: 19
+ nextdefsym: 6
+ iundefsym: 25
nundefsym: 0
tocoff: 0
ntoc: 0
@@ -1683,7 +1821,7 @@ LoadCommands:
ZeroPadBytes: 7
- cmd: LC_UUID
cmdsize: 24
- uuid: 4C4C4480-5555-3144-A138-E5DA50CC68DB
+ uuid: 4C4C443F-5555-3144-A15F-DE084AB2A15B
- cmd: LC_BUILD_VERSION
cmdsize: 32
platform: 1
@@ -1692,22 +1830,22 @@ LoadCommands:
ntools: 1
Tools:
- tool: 4
- version: 1376256
+ version: 1245445
- cmd: LC_MAIN
cmdsize: 24
- entryoff: 768
+ entryoff: 776
stacksize: 0
- cmd: LC_FUNCTION_STARTS
cmdsize: 16
- dataoff: 16480
+ dataoff: 16496
datasize: 8
- cmd: LC_DATA_IN_CODE
cmdsize: 16
- dataoff: 16488
+ dataoff: 16504
datasize: 0
- cmd: LC_CODE_SIGNATURE
cmdsize: 16
- dataoff: 17040
+ dataoff: 17120
datasize: 304
LinkEditData:
ExportTrie:
@@ -1738,7 +1876,7 @@ LinkEditData:
NodeOffset: 47
Name: main
Flags: 0x0
- Address: 0x300
+ Address: 0x308
Other: 0x0
ImportName: ''
- TerminalSize: 0
@@ -1749,8 +1887,15 @@ LinkEditData:
Other: 0x0
ImportName: ''
Children:
+ - TerminalSize: 3
+ NodeOffset: 80
+ Name: 1_copy1
+ Flags: 0x0
+ Address: 0x2F0
+ Other: 0x0
+ ImportName: ''
- TerminalSize: 0
- NodeOffset: 71
+ NodeOffset: 85
Name: 2_copy
Flags: 0x0
Address: 0x0
@@ -1758,52 +1903,52 @@ LinkEditData:
ImportName: ''
Children:
- TerminalSize: 3
- NodeOffset: 79
+ NodeOffset: 93
Name: '1'
Flags: 0x0
- Address: 0x2F0
+ Address: 0x2F8
Other: 0x0
ImportName: ''
- TerminalSize: 3
- NodeOffset: 84
+ NodeOffset: 98
Name: '2'
Flags: 0x0
- Address: 0x2F0
+ Address: 0x2F8
Other: 0x0
ImportName: ''
- TerminalSize: 3
- NodeOffset: 89
+ NodeOffset: 103
Name: 3_copy2
Flags: 0x0
- Address: 0x2F8
+ Address: 0x300
Other: 0x0
ImportName: ''
NameList:
- - n_strx: 129
+ - n_strx: 146
n_type: 0x64
n_sect: 0
n_desc: 0
n_value: 0
- - n_strx: 170
+ - n_strx: 187
n_type: 0x66
n_sect: 0
n_desc: 1
n_value: 0
- - n_strx: 59
+ - n_strx: 76
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968152
+ n_value: 4294968172
- n_strx: 1
n_type: 0x24
n_sect: 0
n_desc: 0
n_value: 4
- - n_strx: 84
+ - n_strx: 101
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968156
+ n_value: 4294968176
- n_strx: 1
n_type: 0x24
n_sect: 0
@@ -1813,12 +1958,12 @@ LinkEditData:
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968064
+ n_value: 4294968072
- n_strx: 1
n_type: 0x24
n_sect: 0
n_desc: 0
- n_value: 88
+ n_value: 100
- n_strx: 8
n_type: 0x24
n_sect: 1
@@ -1843,7 +1988,17 @@ LinkEditData:
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968048
+ n_value: 4294968064
+ - n_strx: 1
+ n_type: 0x24
+ n_sect: 0
+ n_desc: 0
+ n_value: 8
+ - n_strx: 59
+ n_type: 0x24
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294968056
- n_strx: 1
n_type: 0x24
n_sect: 0
@@ -1854,21 +2009,21 @@ LinkEditData:
n_sect: 1
n_desc: 0
n_value: 0
- - n_strx: 59
+ - n_strx: 76
n_type: 0x1E
n_sect: 1
n_desc: 0
- n_value: 4294968152
- - n_strx: 84
+ n_value: 4294968172
+ - n_strx: 101
n_type: 0x1E
n_sect: 1
n_desc: 0
- n_value: 4294968156
+ n_value: 4294968176
- n_strx: 2
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 4294968064
+ n_value: 4294968072
- n_strx: 8
n_type: 0xF
n_sect: 1
@@ -1883,8 +2038,13 @@ LinkEditData:
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 4294968048
- - n_strx: 109
+ n_value: 4294968064
+ - n_strx: 59
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294968056
+ - n_strx: 126
n_type: 0xF
n_sect: 1
n_desc: 16
@@ -1892,6 +2052,7 @@ LinkEditData:
StringTable:
- ' '
- _main
+ - _function1_copy1
- _function2_copy1
- _function3_copy2
- _function2_copy2
@@ -1904,6 +2065,5 @@ LinkEditData:
- ''
- ''
- ''
- - ''
- FunctionStarts: [ 0x2F0, 0x2F8, 0x300, 0x358, 0x35C ]
+ FunctionStarts: [ 0x2F0, 0x2F8, 0x300, 0x308, 0x36C, 0x370 ]
...
diff --git a/llvm/test/tools/llvm-ir2vec/entities.ll b/llvm/test/tools/llvm-ir2vec/entities.ll
new file mode 100644
index 0000000..57c3d6f
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/entities.ll
@@ -0,0 +1,95 @@
+; RUN: llvm-ir2vec --mode=entities | FileCheck %s
+
+CHECK: 92
+CHECK-NEXT: Ret 0
+CHECK-NEXT: Br 1
+CHECK-NEXT: Switch 2
+CHECK-NEXT: IndirectBr 3
+CHECK-NEXT: Invoke 4
+CHECK-NEXT: Resume 5
+CHECK-NEXT: Unreachable 6
+CHECK-NEXT: CleanupRet 7
+CHECK-NEXT: CatchRet 8
+CHECK-NEXT: CatchSwitch 9
+CHECK-NEXT: CallBr 10
+CHECK-NEXT: FNeg 11
+CHECK-NEXT: Add 12
+CHECK-NEXT: FAdd 13
+CHECK-NEXT: Sub 14
+CHECK-NEXT: FSub 15
+CHECK-NEXT: Mul 16
+CHECK-NEXT: FMul 17
+CHECK-NEXT: UDiv 18
+CHECK-NEXT: SDiv 19
+CHECK-NEXT: FDiv 20
+CHECK-NEXT: URem 21
+CHECK-NEXT: SRem 22
+CHECK-NEXT: FRem 23
+CHECK-NEXT: Shl 24
+CHECK-NEXT: LShr 25
+CHECK-NEXT: AShr 26
+CHECK-NEXT: And 27
+CHECK-NEXT: Or 28
+CHECK-NEXT: Xor 29
+CHECK-NEXT: Alloca 30
+CHECK-NEXT: Load 31
+CHECK-NEXT: Store 32
+CHECK-NEXT: GetElementPtr 33
+CHECK-NEXT: Fence 34
+CHECK-NEXT: AtomicCmpXchg 35
+CHECK-NEXT: AtomicRMW 36
+CHECK-NEXT: Trunc 37
+CHECK-NEXT: ZExt 38
+CHECK-NEXT: SExt 39
+CHECK-NEXT: FPToUI 40
+CHECK-NEXT: FPToSI 41
+CHECK-NEXT: UIToFP 42
+CHECK-NEXT: SIToFP 43
+CHECK-NEXT: FPTrunc 44
+CHECK-NEXT: FPExt 45
+CHECK-NEXT: PtrToInt 46
+CHECK-NEXT: IntToPtr 47
+CHECK-NEXT: BitCast 48
+CHECK-NEXT: AddrSpaceCast 49
+CHECK-NEXT: CleanupPad 50
+CHECK-NEXT: CatchPad 51
+CHECK-NEXT: ICmp 52
+CHECK-NEXT: FCmp 53
+CHECK-NEXT: PHI 54
+CHECK-NEXT: Call 55
+CHECK-NEXT: Select 56
+CHECK-NEXT: UserOp1 57
+CHECK-NEXT: UserOp2 58
+CHECK-NEXT: VAArg 59
+CHECK-NEXT: ExtractElement 60
+CHECK-NEXT: InsertElement 61
+CHECK-NEXT: ShuffleVector 62
+CHECK-NEXT: ExtractValue 63
+CHECK-NEXT: InsertValue 64
+CHECK-NEXT: LandingPad 65
+CHECK-NEXT: Freeze 66
+CHECK-NEXT: FloatTy 67
+CHECK-NEXT: FloatTy 68
+CHECK-NEXT: FloatTy 69
+CHECK-NEXT: FloatTy 70
+CHECK-NEXT: FloatTy 71
+CHECK-NEXT: FloatTy 72
+CHECK-NEXT: FloatTy 73
+CHECK-NEXT: VoidTy 74
+CHECK-NEXT: LabelTy 75
+CHECK-NEXT: MetadataTy 76
+CHECK-NEXT: UnknownTy 77
+CHECK-NEXT: TokenTy 78
+CHECK-NEXT: IntegerTy 79
+CHECK-NEXT: FunctionTy 80
+CHECK-NEXT: PointerTy 81
+CHECK-NEXT: StructTy 82
+CHECK-NEXT: ArrayTy 83
+CHECK-NEXT: VectorTy 84
+CHECK-NEXT: VectorTy 85
+CHECK-NEXT: PointerTy 86
+CHECK-NEXT: UnknownTy 87
+CHECK-NEXT: Function 88
+CHECK-NEXT: Pointer 89
+CHECK-NEXT: Constant 90
+CHECK-NEXT: Variable 91
diff --git a/llvm/test/tools/llvm-ir2vec/error-handling.ll b/llvm/test/tools/llvm-ir2vec/error-handling.ll
new file mode 100644
index 0000000..c23c529
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/error-handling.ll
@@ -0,0 +1,22 @@
+; Test error handling and input validation for llvm-ir2vec tool
+
+; RUN: not llvm-ir2vec --mode=embeddings %s 2>&1 | FileCheck %s -check-prefix=CHECK-NO-VOCAB
+
+; RUN: not llvm-ir2vec --mode=embeddings --function=nonexistent --ir2vec-vocab-path=%ir2vec_test_vocab_dir/dummy_3D_nonzero_opc_vocab.json %s 2>&1 | FileCheck %s -check-prefix=CHECK-FUNC-NOT-FOUND
+
+; RUN: llvm-ir2vec --mode=triplets --ir2vec-vocab-path=%ir2vec_test_vocab_dir/dummy_3D_nonzero_opc_vocab.json --level=inst %s 2>&1 | FileCheck %s -check-prefix=CHECK-UNUSED-LEVEL
+; RUN: llvm-ir2vec --mode=entities --level=inst %s 2>&1 | FileCheck %s -check-prefix=CHECK-UNUSED-LEVEL
+
+; RUN: llvm-ir2vec --mode=triplets --ir2vec-vocab-path=%ir2vec_test_vocab_dir/dummy_3D_nonzero_opc_vocab.json --function=dummy %s 2>&1 | FileCheck %s -check-prefix=CHECK-UNUSED-FUNC
+; RUN: llvm-ir2vec --mode=entities --function=dummy %s 2>&1 | FileCheck %s -check-prefix=CHECK-UNUSED-FUNC
+
+; Simple test function for valid IR
+define i32 @test_func(i32 %a) {
+entry:
+ ret i32 %a
+}
+
+; CHECK-NO-VOCAB: error: IR2Vec vocabulary file path not specified; You may need to set it using --ir2vec-vocab-path
+; CHECK-FUNC-NOT-FOUND: Error: Function 'nonexistent' not found
+; CHECK-UNUSED-LEVEL: Warning: --level option is ignored
+; CHECK-UNUSED-FUNC: Warning: --function option is ignored
diff --git a/llvm/test/tools/llvm-ir2vec/triplets.ll b/llvm/test/tools/llvm-ir2vec/triplets.ll
index d1ef5b3..dcd1dc9 100644
--- a/llvm/test/tools/llvm-ir2vec/triplets.ll
+++ b/llvm/test/tools/llvm-ir2vec/triplets.ll
@@ -24,15 +24,42 @@ entry:
ret i32 %result
}
-; TRIPLETS: Add IntegerTy Variable Variable
-; TRIPLETS-NEXT: Ret VoidTy Variable
-; TRIPLETS-NEXT: Mul IntegerTy Variable Variable
-; TRIPLETS-NEXT: Ret VoidTy Variable
-; TRIPLETS-NEXT: Alloca PointerTy Constant
-; TRIPLETS-NEXT: Alloca PointerTy Constant
-; TRIPLETS-NEXT: Store VoidTy Variable Pointer
-; TRIPLETS-NEXT: Store VoidTy Variable Pointer
-; TRIPLETS-NEXT: Load IntegerTy Pointer
-; TRIPLETS-NEXT: Load IntegerTy Pointer
-; TRIPLETS-NEXT: Add IntegerTy Variable Variable
-; TRIPLETS-NEXT: Ret VoidTy Variable
+; TRIPLETS: MAX_RELATION=3
+; TRIPLETS-NEXT: 12 79 0
+; TRIPLETS-NEXT: 12 91 2
+; TRIPLETS-NEXT: 12 91 3
+; TRIPLETS-NEXT: 12 0 1
+; TRIPLETS-NEXT: 0 74 0
+; TRIPLETS-NEXT: 0 91 2
+; TRIPLETS-NEXT: 16 79 0
+; TRIPLETS-NEXT: 16 91 2
+; TRIPLETS-NEXT: 16 91 3
+; TRIPLETS-NEXT: 16 0 1
+; TRIPLETS-NEXT: 0 74 0
+; TRIPLETS-NEXT: 0 91 2
+; TRIPLETS-NEXT: 30 81 0
+; TRIPLETS-NEXT: 30 90 2
+; TRIPLETS-NEXT: 30 30 1
+; TRIPLETS-NEXT: 30 81 0
+; TRIPLETS-NEXT: 30 90 2
+; TRIPLETS-NEXT: 30 32 1
+; TRIPLETS-NEXT: 32 74 0
+; TRIPLETS-NEXT: 32 91 2
+; TRIPLETS-NEXT: 32 89 3
+; TRIPLETS-NEXT: 32 32 1
+; TRIPLETS-NEXT: 32 74 0
+; TRIPLETS-NEXT: 32 91 2
+; TRIPLETS-NEXT: 32 89 3
+; TRIPLETS-NEXT: 32 31 1
+; TRIPLETS-NEXT: 31 79 0
+; TRIPLETS-NEXT: 31 89 2
+; TRIPLETS-NEXT: 31 31 1
+; TRIPLETS-NEXT: 31 79 0
+; TRIPLETS-NEXT: 31 89 2
+; TRIPLETS-NEXT: 31 12 1
+; TRIPLETS-NEXT: 12 79 0
+; TRIPLETS-NEXT: 12 91 2
+; TRIPLETS-NEXT: 12 91 3
+; TRIPLETS-NEXT: 12 0 1
+; TRIPLETS-NEXT: 0 74 0
+; TRIPLETS-NEXT: 0 91 2
diff --git a/llvm/test/tools/llvm-mc/disassembler-profile.test b/llvm/test/tools/llvm-mc/disassembler-profile.test
new file mode 100644
index 0000000..67afdce
--- /dev/null
+++ b/llvm/test/tools/llvm-mc/disassembler-profile.test
@@ -0,0 +1,12 @@
+# REQUIRES: aarch64-registered-target
+# RUN: rm -rf %t.json
+# RUN: llvm-mc -triple=aarch64 -disassemble -o /dev/null %s -runs=1000 -time-trace -time-trace-file=%t.json
+# RUN: FileCheck --input-file %t.json %s
+
+# Note: Test input taken from llvm/test/MC/Disassembler/AArch64/udf.txt
+
+# CHECK: "name":"Total getInstruction"
+# CHECK: "args":{"count":3,"avg ms":{{.*}}}
+[0x00,0x00,0x00,0x00]
+[0x01,0x02,0x00,0x00]
+[0xff,0xff,0x00,0x00]
diff --git a/llvm/test/tools/llvm-objcopy/COFF/exe-bogus-assoc.test b/llvm/test/tools/llvm-objcopy/COFF/exe-bogus-assoc.test
new file mode 100644
index 0000000..12f14b5
--- /dev/null
+++ b/llvm/test/tools/llvm-objcopy/COFF/exe-bogus-assoc.test
@@ -0,0 +1,134 @@
+## Test that bogus associative section symbols in executables are ignored.
+##
+## The executable contains two (bogus) associative section symbols, both for
+## (parts of) the .rdata section; one pointing at the .debug_info section
+## (which will be stripped out) and one pointing at a nonexistent section.
+##
+## Check that stripping does succeed, and that it doesn't end up removing
+## the .rdata section.
+
+# RUN: yaml2obj %s -o %t.in.exe
+
+# RUN: llvm-strip --strip-debug %t.in.exe -o %t.out.exe
+# RUN: llvm-readobj --sections %t.out.exe | FileCheck %s
+
+# CHECK: Name: .rdata
+
+--- !COFF
+OptionalHeader:
+ AddressOfEntryPoint: 4096
+ ImageBase: 5368709120
+ SectionAlignment: 4096
+ FileAlignment: 512
+ MajorOperatingSystemVersion: 4
+ MinorOperatingSystemVersion: 0
+ MajorImageVersion: 0
+ MinorImageVersion: 0
+ MajorSubsystemVersion: 5
+ MinorSubsystemVersion: 2
+ Subsystem: IMAGE_SUBSYSTEM_WINDOWS_CUI
+ DLLCharacteristics: [ ]
+ SizeOfStackReserve: 2097152
+ SizeOfStackCommit: 4096
+ SizeOfHeapReserve: 1048576
+ SizeOfHeapCommit: 4096
+header:
+ Machine: IMAGE_FILE_MACHINE_AMD64
+ Characteristics: [ ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 4096
+ VirtualSize: 48
+ SectionData: E806000000E802000000C3C3C30F1F00FFFFFFFFFFFFFFFF0000000000000000FFFFFFFFFFFFFFFF0000000000000000
+ SizeOfRawData: 512
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 8192
+ VirtualSize: 4
+ SectionData: '00000000'
+ SizeOfRawData: 512
+ - Name: .debug_info
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 16384
+ VirtualSize: 4
+ SectionData: '00000000'
+ SizeOfRawData: 512
+symbols:
+ - Name: .text
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 11
+ NumberOfRelocations: 2
+ NumberOfLinenumbers: 0
+ CheckSum: 1703692295
+ Number: 1
+ - Name: '.text$func1'
+ Value: 11
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 1
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 40735498
+ Number: 3
+ Selection: IMAGE_COMDAT_SELECT_ANY
+ - Name: .rdata
+ Value: 0
+ SectionNumber: 2
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 1
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 3
+ Selection: IMAGE_COMDAT_SELECT_ASSOCIATIVE
+ - Name: '.text$func2'
+ Value: 12
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 1
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 40735498
+ Number: 4
+ Selection: IMAGE_COMDAT_SELECT_ANY
+ - Name: .rdata
+ Value: 1
+ SectionNumber: 2
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 1
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 4
+ Selection: IMAGE_COMDAT_SELECT_ASSOCIATIVE
+ - Name: .debug_info
+ Value: 0
+ SectionNumber: 3
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 4
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 0
+...
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe
index f69c0b1..fc530a4 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw
index ed679dc..d492076 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe
index 14cbfeb..8810ee1 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw
index c3ac49e..6943c18 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexe
new file mode 100755
index 0000000..14cbfeb
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofraw
new file mode 100644
index 0000000..c3ac49e
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe
index 1b4db88..4ab8040 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw
index e959e76..c6aec8d 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe
index 2822f2f..5af6c81 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw
index 05deb2e..8958af9 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe
index 22c6136..e9ec22c 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw
index 364aa1c..3952768 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe
index 34db7e7..e50f663 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw
index 7a7d3a6..df6fcb1 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe
index f7d1723..63eea44 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw
index 0920028..b6a733a 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test b/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test
index 3d30a62..ce534db 100644
--- a/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test
+++ b/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test
@@ -7,7 +7,7 @@ We expect 5 MIBs, each with different AccessHistogramValues.
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 5
CHECK-NEXT: NumAllocFunctions: 3
@@ -241,4 +241,4 @@ CHECK-NEXT: MinLifetimeAccessDensity: 56000
CHECK-NEXT: MaxLifetimeAccessDensity: 56000
CHECK-NEXT: AccessHistogramSize: 8
CHECK-NEXT: AccessHistogram: {{[0-9]+}}
-CHECK-NEXT: AccessHistogramValues: 168 147 126 105 84 63 42 21 \ No newline at end of file
+CHECK-NEXT: AccessHistogramValues: 168 147 126 105 84 63 42 21
diff --git a/llvm/test/tools/llvm-profdata/memprof-basic.test b/llvm/test/tools/llvm-profdata/memprof-basic.test
index e15df50..81550eb 100644
--- a/llvm/test/tools/llvm-profdata/memprof-basic.test
+++ b/llvm/test/tools/llvm-profdata/memprof-basic.test
@@ -8,7 +8,7 @@ additional allocations which do not originate from the main binary are pruned.
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
@@ -96,4 +96,4 @@ CHECK-NEXT: TotalLifetimeAccessDensity: 20000
CHECK-NEXT: MinLifetimeAccessDensity: 20000
CHECK-NEXT: MaxLifetimeAccessDensity: 20000
CHECK-NEXT: AccessHistogramSize: 0
-CHECK-NEXT: AccessHistogram: 0 \ No newline at end of file
+CHECK-NEXT: AccessHistogram: 0
diff --git a/llvm/test/tools/llvm-profdata/memprof-basic_v4.test b/llvm/test/tools/llvm-profdata/memprof-basic_v4.test
new file mode 100644
index 0000000..79d4fe2
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/memprof-basic_v4.test
@@ -0,0 +1,102 @@
+REQUIRES: x86_64-linux
+
+This is a copy of memprof-basic.test with slight changes to check that we can still read v3 of memprofraw.
+
+Inputs cannot and should not be updated.
+
+RUN: llvm-profdata show --memory %p/Inputs/basic_v4.memprofraw --profiled-binary %p/Inputs/basic_v4.memprofexe -o - | FileCheck %s
+
+We expect 2 MIB entries, 1 each for the malloc calls in the program. Any
+additional allocations which do not originate from the main binary are pruned.
+
+CHECK: MemprofProfile:
+CHECK-NEXT: Summary:
+CHECK-NEXT: Version: 4
+CHECK-NEXT: NumSegments: {{[0-9]+}}
+CHECK-NEXT: NumMibInfo: 2
+CHECK-NEXT: NumAllocFunctions: 1
+CHECK-NEXT: NumStackOffsets: 2
+CHECK-NEXT: Segments:
+CHECK-NEXT: -
+CHECK-NEXT: BuildId: {{[[:xdigit:]]+}}
+CHECK-NEXT: Start: 0x{{[[:xdigit:]]+}}
+CHECK-NEXT: End: 0x{{[[:xdigit:]]+}}
+CHECK-NEXT: Offset: 0x{{[[:xdigit:]]+}}
+CHECK-NEXT: -
+
+CHECK: Records:
+CHECK-NEXT: -
+CHECK-NEXT: FunctionGUID: {{[0-9]+}}
+CHECK-NEXT: AllocSites:
+CHECK-NEXT: -
+CHECK-NEXT: Callstack:
+CHECK-NEXT: -
+CHECK-NEXT: Function: {{[0-9]+}}
+CHECK-NEXT: SymbolName: main
+CHECK-NEXT: LineOffset: 1
+CHECK-NEXT: Column: 21
+CHECK-NEXT: Inline: 0
+CHECK-NEXT: MemInfoBlock:
+CHECK-NEXT: AllocCount: 1
+CHECK-NEXT: TotalAccessCount: 2
+CHECK-NEXT: MinAccessCount: 2
+CHECK-NEXT: MaxAccessCount: 2
+CHECK-NEXT: TotalSize: 10
+CHECK-NEXT: MinSize: 10
+CHECK-NEXT: MaxSize: 10
+CHECK-NEXT: AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT: DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT: TotalLifetime: 0
+CHECK-NEXT: MinLifetime: 0
+CHECK-NEXT: MaxLifetime: 0
+CHECK-NEXT: AllocCpuId: {{[0-9]+}}
+CHECK-NEXT: DeallocCpuId: {{[0-9]+}}
+CHECK-NEXT: NumMigratedCpu: 0
+CHECK-NEXT: NumLifetimeOverlaps: 0
+CHECK-NEXT: NumSameAllocCpu: 0
+CHECK-NEXT: NumSameDeallocCpu: 0
+CHECK-NEXT: DataTypeId: {{[0-9]+}}
+CHECK-NEXT: TotalAccessDensity: 20
+CHECK-NEXT: MinAccessDensity: 20
+CHECK-NEXT: MaxAccessDensity: 20
+CHECK-NEXT: TotalLifetimeAccessDensity: 20000
+CHECK-NEXT: MinLifetimeAccessDensity: 20000
+CHECK-NEXT: MaxLifetimeAccessDensity: 20000
+CHECK-NEXT: AccessHistogramSize: 0
+CHECK-NEXT: AccessHistogram: 0
+CHECK-NEXT: -
+CHECK-NEXT: Callstack:
+CHECK-NEXT: -
+CHECK-NEXT: Function: {{[0-9]+}}
+CHECK-NEXT: SymbolName: main
+CHECK-NEXT: LineOffset: 4
+CHECK-NEXT: Column: 15
+CHECK-NEXT: Inline: 0
+CHECK-NEXT: MemInfoBlock:
+CHECK-NEXT: AllocCount: 1
+CHECK-NEXT: TotalAccessCount: 2
+CHECK-NEXT: MinAccessCount: 2
+CHECK-NEXT: MaxAccessCount: 2
+CHECK-NEXT: TotalSize: 10
+CHECK-NEXT: MinSize: 10
+CHECK-NEXT: MaxSize: 10
+CHECK-NEXT: AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT: DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT: TotalLifetime: 0
+CHECK-NEXT: MinLifetime: 0
+CHECK-NEXT: MaxLifetime: 0
+CHECK-NEXT: AllocCpuId: {{[0-9]+}}
+CHECK-NEXT: DeallocCpuId: {{[0-9]+}}
+CHECK-NEXT: NumMigratedCpu: 0
+CHECK-NEXT: NumLifetimeOverlaps: 0
+CHECK-NEXT: NumSameAllocCpu: 0
+CHECK-NEXT: NumSameDeallocCpu: 0
+CHECK-NEXT: DataTypeId: {{[0-9]+}}
+CHECK-NEXT: TotalAccessDensity: 20
+CHECK-NEXT: MinAccessDensity: 20
+CHECK-NEXT: MaxAccessDensity: 20
+CHECK-NEXT: TotalLifetimeAccessDensity: 20000
+CHECK-NEXT: MinLifetimeAccessDensity: 20000
+CHECK-NEXT: MaxLifetimeAccessDensity: 20000
+CHECK-NEXT: AccessHistogramSize: 0
+CHECK-NEXT: AccessHistogram: 0
diff --git a/llvm/test/tools/llvm-profdata/memprof-inline.test b/llvm/test/tools/llvm-profdata/memprof-inline.test
index 79ce2ad..4a3f620 100644
--- a/llvm/test/tools/llvm-profdata/memprof-inline.test
+++ b/llvm/test/tools/llvm-profdata/memprof-inline.test
@@ -5,7 +5,7 @@ RUN: llvm-profdata show --memory %p/Inputs/inline.memprofraw --profiled-binary %
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 2
diff --git a/llvm/test/tools/llvm-profdata/memprof-multi.test b/llvm/test/tools/llvm-profdata/memprof-multi.test
index 6243982..35f94df 100644
--- a/llvm/test/tools/llvm-profdata/memprof-multi.test
+++ b/llvm/test/tools/llvm-profdata/memprof-multi.test
@@ -7,7 +7,7 @@ We expect 2 MIB entries, 1 each for the malloc calls in the program.
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
diff --git a/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test b/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test
index 4ba58e3..2d0346e 100644
--- a/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test
+++ b/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test
@@ -7,7 +7,7 @@ We expect 2 different MIBs with histogram values. This test is to make sure we p
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
@@ -21,79 +21,79 @@ CHECK-NEXT: Offset: 0x{{[[:xdigit:]]+}}
CHECK-NEXT: -
CHECK: Records:
-CHEC-NEXT FunctionGUID: {{[0-9]+}}
-CHEC-NEXT AllocSites:
-CHEC-NEXT -
-CHEC-NEXT Callstack:
-CHEC-NEXT -
-CHEC-NEXT Function: {{[0-9]+}}
-CHEC-NEXT SymbolName: main
-CHEC-NEXT LineOffset: 3
-CHEC-NEXT Column: 10
-CHEC-NEXT Inline: 0
-CHEC-NEXT MemInfoBlock:
-CHEC-NEXT AllocCount: 1
-CHEC-NEXT TotalAccessCount: 5
-CHEC-NEXT MinAccessCount: 5
-CHEC-NEXT MaxAccessCount: 5
-CHEC-NEXT TotalSize: 24
-CHEC-NEXT MinSize: 24
-CHEC-NEXT MaxSize: 24
-CHEC-NEXT AllocTimestamp: {{[0-9]+}}
-CHEC-NEXT DeallocTimestamp: {{[0-9]+}}
-CHEC-NEXT TotalLifetime: 0
-CHEC-NEXT MinLifetime: 0
-CHEC-NEXT MaxLifetime: 0
-CHEC-NEXT AllocCpuId: 11
-CHEC-NEXT DeallocCpuId: 11
-CHEC-NEXT NumMigratedCpu: 0
-CHEC-NEXT NumLifetimeOverlaps: 0
-CHEC-NEXT NumSameAllocCpu: 0
-CHEC-NEXT NumSameDeallocCpu: 0
-CHEC-NEXT DataTypeId: 0
-CHEC-NEXT TotalAccessDensity: 20
-CHEC-NEXT MinAccessDensity: 20
-CHEC-NEXT MaxAccessDensity: 20
-CHEC-NEXT TotalLifetimeAccessDensity: 20000
-CHEC-NEXT MinLifetimeAccessDensity: 20000
-CHEC-NEXT MaxLifetimeAccessDensity: 20000
-CHEC-NEXT AccessHistogramSize: 3
-CHEC-NEXT AccessHistogram: {{[0-9]+}}
-CHEC-NEXT AccessHistogramValues: -2 -1 -2
-CHEC-NEXT -
-CHEC-NEXT Callstack:
-CHEC-NEXT -
-CHEC-NEXT Function: {{[0-9]+}}
-CHEC-NEXT SymbolName: main
-CHEC-NEXT LineOffset: 10
-CHEC-NEXT Column: 10
-CHEC-NEXT Inline: 0
-CHEC-NEXT MemInfoBlock:
-CHEC-NEXT AllocCount: 1
-CHEC-NEXT TotalAccessCount: 4
-CHEC-NEXT MinAccessCount: 4
-CHEC-NEXT MaxAccessCount: 4
-CHEC-NEXT TotalSize: 48
-CHEC-NEXT MinSize: 48
-CHEC-NEXT MaxSize: 48
-CHEC-NEXT AllocTimestamp: {{[0-9]+}}
-CHEC-NEXT DeallocTimestamp: {{[0-9]+}}
-CHEC-NEXT TotalLifetime: 0
-CHEC-NEXT MinLifetime: 0
-CHEC-NEXT MaxLifetime: 0
-CHEC-NEXT AllocCpuId: 11
-CHEC-NEXT DeallocCpuId: 11
-CHEC-NEXT NumMigratedCpu: 0
-CHEC-NEXT NumLifetimeOverlaps: 0
-CHEC-NEXT NumSameAllocCpu: 0
-CHEC-NEXT NumSameDeallocCpu: 0
-CHEC-NEXT DataTypeId: 0
-CHEC-NEXT TotalAccessDensity: 8
-CHEC-NEXT MinAccessDensity: 8
-CHEC-NEXT MaxAccessDensity: 8
-CHEC-NEXT TotalLifetimeAccessDensity: 8000
-CHEC-NEXT MinLifetimeAccessDensity: 8000
-CHEC-NEXT MaxLifetimeAccessDensity: 8000
-CHEC-NEXT AccessHistogramSize: 6
-CHEC-NEXT AccessHistogram: {{[0-9]+}}
-CHEC-NEXT AccessHistogramValues: -2 -0 -0 -0 -1 -1 \ No newline at end of file
+CHECK-NEXT FunctionGUID: {{[0-9]+}}
+CHECK-NEXT AllocSites:
+CHECK-NEXT -
+CHECK-NEXT Callstack:
+CHECK-NEXT -
+CHECK-NEXT Function: {{[0-9]+}}
+CHECK-NEXT SymbolName: main
+CHECK-NEXT LineOffset: 3
+CHECK-NEXT Column: 10
+CHECK-NEXT Inline: 0
+CHECK-NEXT MemInfoBlock:
+CHECK-NEXT AllocCount: 1
+CHECK-NEXT TotalAccessCount: 5
+CHECK-NEXT MinAccessCount: 5
+CHECK-NEXT MaxAccessCount: 5
+CHECK-NEXT TotalSize: 24
+CHECK-NEXT MinSize: 24
+CHECK-NEXT MaxSize: 24
+CHECK-NEXT AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT TotalLifetime: 0
+CHECK-NEXT MinLifetime: 0
+CHECK-NEXT MaxLifetime: 0
+CHECK-NEXT AllocCpuId: 11
+CHECK-NEXT DeallocCpuId: 11
+CHECK-NEXT NumMigratedCpu: 0
+CHECK-NEXT NumLifetimeOverlaps: 0
+CHECK-NEXT NumSameAllocCpu: 0
+CHECK-NEXT NumSameDeallocCpu: 0
+CHECK-NEXT DataTypeId: 0
+CHECK-NEXT TotalAccessDensity: 20
+CHECK-NEXT MinAccessDensity: 20
+CHECK-NEXT MaxAccessDensity: 20
+CHECK-NEXT TotalLifetimeAccessDensity: 20000
+CHECK-NEXT MinLifetimeAccessDensity: 20000
+CHECK-NEXT MaxLifetimeAccessDensity: 20000
+CHECK-NEXT AccessHistogramSize: 3
+CHECK-NEXT AccessHistogram: {{[0-9]+}}
+CHECK-NEXT AccessHistogramValues: -2 -1 -2
+CHECK-NEXT -
+CHECK-NEXT Callstack:
+CHECK-NEXT -
+CHECK-NEXT Function: {{[0-9]+}}
+CHECK-NEXT SymbolName: main
+CHECK-NEXT LineOffset: 10
+CHECK-NEXT Column: 10
+CHECK-NEXT Inline: 0
+CHECK-NEXT MemInfoBlock:
+CHECK-NEXT AllocCount: 1
+CHECK-NEXT TotalAccessCount: 4
+CHECK-NEXT MinAccessCount: 4
+CHECK-NEXT MaxAccessCount: 4
+CHECK-NEXT TotalSize: 48
+CHECK-NEXT MinSize: 48
+CHECK-NEXT MaxSize: 48
+CHECK-NEXT AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT TotalLifetime: 0
+CHECK-NEXT MinLifetime: 0
+CHECK-NEXT MaxLifetime: 0
+CHECK-NEXT AllocCpuId: 11
+CHECK-NEXT DeallocCpuId: 11
+CHECK-NEXT NumMigratedCpu: 0
+CHECK-NEXT NumLifetimeOverlaps: 0
+CHECK-NEXT NumSameAllocCpu: 0
+CHECK-NEXT NumSameDeallocCpu: 0
+CHECK-NEXT DataTypeId: 0
+CHECK-NEXT TotalAccessDensity: 8
+CHECK-NEXT MinAccessDensity: 8
+CHECK-NEXT MaxAccessDensity: 8
+CHECK-NEXT TotalLifetimeAccessDensity: 8000
+CHECK-NEXT MinLifetimeAccessDensity: 8000
+CHECK-NEXT MaxLifetimeAccessDensity: 8000
+CHECK-NEXT AccessHistogramSize: 6
+CHECK-NEXT AccessHistogram: {{[0-9]+}}
+CHECK-NEXT AccessHistogramValues: -2 -0 -0 -0 -1 -1
diff --git a/llvm/test/tools/llvm-profdata/memprof-pic.test b/llvm/test/tools/llvm-profdata/memprof-pic.test
index 78d2c5c..66203ef 100644
--- a/llvm/test/tools/llvm-profdata/memprof-pic.test
+++ b/llvm/test/tools/llvm-profdata/memprof-pic.test
@@ -11,7 +11,7 @@ RUN: llvm-profdata show --memory %p/Inputs/pic.memprofraw --profiled-binary %p/I
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
@@ -100,4 +100,4 @@ CHECK-NEXT: TotalLifetimeAccessDensity: 20000
CHECK-NEXT: MinLifetimeAccessDensity: 20000
CHECK-NEXT: MaxLifetimeAccessDensity: 20000
CHECK-NEXT: AccessHistogramSize: 0
-CHECK-NEXT: AccessHistogram: 0 \ No newline at end of file
+CHECK-NEXT: AccessHistogram: 0
diff --git a/llvm/test/tools/llvm-rc/windres-preproc.test b/llvm/test/tools/llvm-rc/windres-preproc.test
index 423ad02..43ed0a6 100644
--- a/llvm/test/tools/llvm-rc/windres-preproc.test
+++ b/llvm/test/tools/llvm-rc/windres-preproc.test
@@ -4,7 +4,7 @@
; RUN: llvm-windres -### --include-dir %p/incdir1 --include %p/incdir2 "-DFOO1=\\\"foo bar\\\"" -UFOO2 -D FOO3 --preprocessor-arg "-DFOO4=\\\"baz baz\\\"" -DFOO5=\"bar\" %p/Inputs/empty.rc %t.res | FileCheck %s --check-prefix=CHECK1
; RUN: llvm-windres -### --include-dir %p/incdir1 --include %p/incdir2 "-DFOO1=\"foo bar\"" -UFOO2 -D FOO3 --preprocessor-arg "-DFOO4=\"baz baz\"" "-DFOO5=bar" %p/Inputs/empty.rc %t.res --use-temp-file | FileCheck %s --check-prefix=CHECK1
-; CHECK1: {{^}} "clang" "--driver-mode=gcc" "-target" "{{.*}}-{{.*}}{{mingw32|windows-gnu}}" "-E" "-xc" "-DRC_INVOKED" "-I" "{{.*}}incdir1" "-I" "{{.*}}incdir2" "-D" "FOO1=\"foo bar\"" "-U" "FOO2" "-D" "FOO3" "-DFOO4=\"baz baz\"" "-D" "FOO5=bar" "{{.*}}empty.rc" "-o" "{{.*}}preproc-{{.*}}.rc"{{$}}
+; CHECK1: {{^}} "clang" "--driver-mode=gcc" "-target" "{{.*}}-{{.*}}{{mingw32|cygwin|windows-gnu|windows-cygnus}}" "-E" "-xc" "-DRC_INVOKED" "-I" "{{.*}}incdir1" "-I" "{{.*}}incdir2" "-D" "FOO1=\"foo bar\"" "-U" "FOO2" "-D" "FOO3" "-DFOO4=\"baz baz\"" "-D" "FOO5=bar" "{{.*}}empty.rc" "-o" "{{.*}}preproc-{{.*}}.rc"{{$}}
; RUN: llvm-windres -### --preprocessor "i686-w64-mingw32-gcc" --preprocessor-arg -E "-DFOO=\\\"foo bar\\\"" %p/Inputs/empty.rc %t.res | FileCheck %s --check-prefix=CHECK2
; CHECK2: {{^}} "{{.*}}i686-w64-mingw32-gcc" "-E" "-D" "FOO=\"foo bar\"" "{{.*}}empty.rc" "-o" "{{.*}}preproc-{{.*}}.rc"{{$}}
@@ -13,7 +13,7 @@
; RUN: rm -rf %t-bin/testbin
; RUN: mkdir -p %t-bin/testbin
; RUN: ln -s llvm-windres %t-bin/testbin/i686-w64-mingw32-gcc
-; RUN: env PATH=%t-bin/testbin llvm-windres -### --preprocessor i686-w64-mingw32-gcc --preprocessor-arg -E --preprocessor-arg -xc -DRC_INVOKED %p/Inputs/empty.rc %t.res | FileCheck %s --check-prefix=CHECK3
+; RUN: env PATH="%t-bin/testbin:$PATH" llvm-windres -### --preprocessor i686-w64-mingw32-gcc --preprocessor-arg -E --preprocessor-arg -xc -DRC_INVOKED %p/Inputs/empty.rc %t.res | FileCheck %s --check-prefix=CHECK3
; CHECK3: {{^}} "{{.*}}/testbin/i686-w64-mingw32-gcc" "-E" "-xc" "-D" "RC_INVOKED" "{{.*}}empty.rc" "-o" "{{.*}}preproc-{{.*}}.rc"{{$}}
diff --git a/llvm/test/tools/llvm-readobj/COFF/Inputs/has-cet.exe b/llvm/test/tools/llvm-readobj/COFF/Inputs/has-cet.exe
deleted file mode 100644
index c77060d..0000000
--- a/llvm/test/tools/llvm-readobj/COFF/Inputs/has-cet.exe
+++ /dev/null
Binary files differ
diff --git a/llvm/test/tools/llvm-readobj/COFF/Inputs/has-exdllcharacteristics.exe b/llvm/test/tools/llvm-readobj/COFF/Inputs/has-exdllcharacteristics.exe
new file mode 100644
index 0000000..9c36817
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/COFF/Inputs/has-exdllcharacteristics.exe
Binary files differ
diff --git a/llvm/test/tools/llvm-readobj/COFF/cetcompat.test b/llvm/test/tools/llvm-readobj/COFF/cetcompat.test
deleted file mode 100644
index a973b5c..0000000
--- a/llvm/test/tools/llvm-readobj/COFF/cetcompat.test
+++ /dev/null
@@ -1,16 +0,0 @@
-# To regenerate has-cet.exe
-# $ echo int main() { return 0; } > has-cet.c
-# $ cl has-cet.c /link /cetcompat
-RUN: llvm-readobj --coff-debug-directory %p/Inputs/has-cet.exe | FileCheck %s
-
-CHECK: DebugEntry {
-CHECK: Characteristics: 0x0
-CHECK: Type: ExtendedDLLCharacteristics (0x14)
-CHECK: ExtendedCharacteristics [ (0x1)
-CHECK: IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT (0x1)
-CHECK: ]
-CHECK: RawData (
-CHECK: 0000: 01000000 |....|
-CHECK: )
-CHECK: }
-
diff --git a/llvm/test/tools/llvm-readobj/COFF/exdllcharacteristics.test b/llvm/test/tools/llvm-readobj/COFF/exdllcharacteristics.test
new file mode 100644
index 0000000..ef35aea
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/COFF/exdllcharacteristics.test
@@ -0,0 +1,22 @@
+# To regenerate has-exdllcharacteristics.exe
+# $ echo int main() { return 0; } > has-exdllcharacteristics.c
+# To make minimum possible stub file (to decrease the binary size)
+# $ echo -n '4D5A00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' | xxd -r -p > stub
+# $ cl has-exdllcharacteristics.c /link /entry:main /STUB:stub /NOCOFFGRPINFO /EMITTOOLVERSIONINFO:NO /EMITPOGOPHASEINFO /NOVCFEATURE /MANIFEST:NO /cetcompat /cetcompatstrict /cetdynamicapisinproc /cetipvalidationrelaxed /hotpatchcompatible /functionpadmin:6
+RUN: llvm-readobj --coff-debug-directory %p/Inputs/has-exdllcharacteristics.exe | FileCheck %s
+
+CHECK: DebugEntry {
+CHECK: Characteristics: 0x0
+CHECK: Type: ExtendedDLLCharacteristics (0x14)
+CHECK: ExtendedCharacteristics [ (0x8F)
+CHECK-DAG: IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT (0x1)
+CHECK-DAG: IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT_STRICT_MODE (0x2)
+CHECK-DAG: IMAGE_DLL_CHARACTERISTICS_EX_CET_SET_CONTEXT_IP_VALIDATION_RELAXED_MODE (0x4)
+CHECK-DAG: IMAGE_DLL_CHARACTERISTICS_EX_CET_DYNAMIC_APIS_ALLOW_IN_PROC_ONLY (0x8)
+CHECK-DAG: IMAGE_DLL_CHARACTERISTICS_EX_HOTPATCH_COMPATIBLE (0x80)
+CHECK: ]
+CHECK: RawData (
+CHECK: 0000: 8F000000 |....|
+CHECK: )
+CHECK: }
+
diff --git a/llvm/test/tools/llvm-readobj/ELF/sframe-fde.test b/llvm/test/tools/llvm-readobj/ELF/sframe-fde.test
new file mode 100644
index 0000000..dee4018
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/ELF/sframe-fde.test
@@ -0,0 +1,237 @@
+## Check parsing and dumping of SFrame Function Descriptor Entries.
+# RUN: yaml2obj --docnum=1 %s -o %t.1
+# RUN: llvm-readobj --sframe=.sframe_short --sframe=.sframe_section_relative \
+# RUN: --sframe=.sframe_fde_relative %t.1 2>&1 | \
+# RUN: FileCheck %s --strict-whitespace --match-full-lines \
+# RUN: -DFILE=%t.1 --check-prefix=CASE1
+
+## Check big-endian support.
+# RUN: yaml2obj --docnum=2 %s -o %t.2
+# RUN: llvm-readobj --sframe %t.2 2>&1 | \
+# RUN: FileCheck %s --strict-whitespace --match-full-lines \
+# RUN: -DFILE=%t.2 --check-prefix=CASE2
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+Sections:
+ - Name: .sframe_short
+ Type: SHT_GNU_SFRAME
+ Flags: [ SHF_ALLOC ]
+ ContentArray: [
+ 0xe2, 0xde, 0x02, 0x00, # Preamble (magic, version, flags)
+ # Header:
+ 0x03, 0x42, 0x47, 0x04, # ABI, Fixed FP offset, Fixed RA Offset, AUX header length
+ 0x01, 0x00, 0x00, 0x00, # Number of FDEs
+ 0x10, 0x00, 0x00, 0x00, # Number of FREs
+ 0x00, 0x10, 0x00, 0x00, # FRE length
+ 0x00, 0x00, 0x00, 0x00, # FDE offset
+ 0x00, 0x01, 0x00, 0x00, # FRE offset
+ 0xde, 0xad, 0xbe, 0xef, # AUX header
+ 0x01, 0x02, 0x03, 0x04, # Short FDE
+ ]
+# CASE1-LABEL:SFrame section '.sframe_short' {
+# CASE1: Header {
+# CASE1-NEXT: Magic: 0xDEE2
+# CASE1-NEXT: Version: V2 (0x2)
+# CASE1-NEXT: Flags [ (0x0)
+# CASE1-NEXT: ]
+# CASE1-NEXT: ABI: AMD64EndianLittle (0x3)
+# CASE1-NEXT: CFA fixed FP offset (unused): 66
+# CASE1-NEXT: CFA fixed RA offset: 71
+# CASE1-NEXT: Auxiliary header length: 4
+# CASE1-NEXT: Num FDEs: 1
+# CASE1-NEXT: Num FREs: 16
+# CASE1-NEXT: FRE subsection length: 4096
+# CASE1-NEXT: FDE subsection offset: 0
+# CASE1-NEXT: FRE subsection offset: 256
+# CASE1-NEXT: Auxiliary header: [0xDE, 0xAD, 0xBE, 0xEF]
+# CASE1-NEXT: }
+# CASE1-NEXT:{{.*}}: warning: '[[FILE]]': unexpected end of data at offset 0x24 while reading [0x20, 0x34)
+# CASE1-NEXT:}
+
+ - Name: .sframe_section_relative
+ Type: SHT_GNU_SFRAME
+ Flags: [ SHF_ALLOC ]
+ ContentArray: [
+ 0xe2, 0xde, 0x02, 0x00, # Preamble (magic, version, flags)
+ # Header:
+ 0x03, 0x42, 0x47, 0x00, # ABI, Fixed FP offset, Fixed RA Offset, AUX header length
+ 0x01, 0x00, 0x00, 0x00, # Number of FDEs
+ 0x10, 0x00, 0x00, 0x00, # Number of FREs
+ 0x00, 0x10, 0x00, 0x00, # FRE length
+ 0x04, 0x00, 0x00, 0x00, # FDE offset
+ 0x00, 0x01, 0x00, 0x00, # FRE offset
+
+ 0xff, 0xff, 0xff, 0xff, # Unused data skipped due to the FDE offset field
+
+ # FDE:
+ 0x00, 0xde, 0xad, 0x00, # Start Address
+ 0xbe, 0x01, 0x00, 0x00, # Size
+ 0x10, 0x00, 0x00, 0x00, # Start FRE Offset
+ 0x00, 0x00, 0x00, 0x00, # Number of FREs
+ 0x31, 0xde, 0xad, 0x00, # Info, RepSize, Padding2
+ ]
+## Also testing:
+## - dead space between the header and the FDE subsection.
+## - PCMask FDE types
+## - unused PAuth key handling
+# CASE1-LABEL:SFrame section '.sframe_section_relative' {
+# CASE1: Header {
+# CASE1-NEXT: Magic: 0xDEE2
+# CASE1-NEXT: Version: V2 (0x2)
+# CASE1-NEXT: Flags [ (0x0)
+# CASE1-NEXT: ]
+# CASE1-NEXT: ABI: AMD64EndianLittle (0x3)
+# CASE1-NEXT: CFA fixed FP offset (unused): 66
+# CASE1-NEXT: CFA fixed RA offset: 71
+# CASE1-NEXT: Auxiliary header length: 0
+# CASE1-NEXT: Num FDEs: 1
+# CASE1-NEXT: Num FREs: 16
+# CASE1-NEXT: FRE subsection length: 4096
+# CASE1-NEXT: FDE subsection offset: 4
+# CASE1-NEXT: FRE subsection offset: 256
+# CASE1-NEXT: Auxiliary header: []
+# CASE1-NEXT: }
+# CASE1-NEXT: Function Index [
+# CASE1-NEXT: FuncDescEntry [0] {
+# CASE1-NEXT: PC: 0xADDE24
+# CASE1-NEXT: Size: 0x1BE
+# CASE1-NEXT: Start FRE Offset: 0x10
+# CASE1-NEXT: Num FREs: 0
+# CASE1-NEXT: Info {
+# CASE1-NEXT: FRE Type: Addr2 (0x1)
+# CASE1-NEXT: FDE Type: PCMask (0x1)
+# CASE1-NEXT: Raw: 0x31
+# CASE1-NEXT: }
+# CASE1-NEXT: Repetitive block size: 0xDE
+# CASE1-NEXT: Padding2: 0xAD
+# CASE1-NEXT: }
+# CASE1-NEXT: ]
+# CASE1-NEXT:}
+
+ - Name: .sframe_fde_relative
+ Type: SHT_GNU_SFRAME
+ Flags: [ SHF_ALLOC ]
+ ContentArray: [
+ 0xe2, 0xde, 0x02, 0x04, # Preamble (magic, version, flags)
+ # Header:
+ 0x02, 0x42, 0x47, 0x00, # ABI, Fixed FP offset, Fixed RA Offset, AUX header length
+ 0x01, 0x00, 0x00, 0x00, # Number of FDEs
+ 0x10, 0x00, 0x00, 0x00, # Number of FREs
+ 0x00, 0x10, 0x00, 0x00, # FRE length
+ 0x04, 0x00, 0x00, 0x00, # FDE offset
+ 0x00, 0x01, 0x00, 0x00, # FRE offset
+
+ 0xff, 0xff, 0xff, 0xff, # Unused data skipped due to the FDE offset field
+
+ # FDE:
+ 0x00, 0xde, 0xad, 0x00, # Start Address
+ 0xbe, 0x01, 0x00, 0x00, # Size
+ 0x10, 0x00, 0x00, 0x00, # Start FRE Offset
+ 0x00, 0x00, 0x00, 0x00, # Number of FREs
+ 0x02, 0xde, 0xad, 0x00, # Info, RepSize, Padding2
+ ]
+## Also testing:
+## - PCInc FDE type
+## - AArch64 PAuth key handling
+# CASE1-LABEL:SFrame section '.sframe_fde_relative' {
+# CASE1: Header {
+# CASE1-NEXT: Magic: 0xDEE2
+# CASE1-NEXT: Version: V2 (0x2)
+# CASE1-NEXT: Flags [ (0x4)
+# CASE1-NEXT: FDEFuncStartPCRel (0x4){{ *}}
+# CASE1-NEXT: ]
+# CASE1-NEXT: ABI: AArch64EndianLittle (0x2)
+# CASE1-NEXT: CFA fixed FP offset (unused): 66
+# CASE1-NEXT: CFA fixed RA offset (unused): 71
+# CASE1-NEXT: Auxiliary header length: 0
+# CASE1-NEXT: Num FDEs: 1
+# CASE1-NEXT: Num FREs: 16
+# CASE1-NEXT: FRE subsection length: 4096
+# CASE1-NEXT: FDE subsection offset: 4
+# CASE1-NEXT: FRE subsection offset: 256
+# CASE1-NEXT: Auxiliary header: []
+# CASE1-NEXT: }
+# CASE1-NEXT: Function Index [
+# CASE1-NEXT: FuncDescEntry [0] {
+# CASE1-NEXT: PC: 0xADDE78
+# CASE1-NEXT: Size: 0x1BE
+# CASE1-NEXT: Start FRE Offset: 0x10
+# CASE1-NEXT: Num FREs: 0
+# CASE1-NEXT: Info {
+# CASE1-NEXT: FRE Type: Addr4 (0x2)
+# CASE1-NEXT: FDE Type: PCInc (0x0)
+# CASE1-NEXT: PAuth Key: A (0x0)
+# CASE1-NEXT: Raw: 0x2
+# CASE1-NEXT: }
+# CASE1-NEXT: Repetitive block size (unused): 0xDE
+# CASE1-NEXT: Padding2: 0xAD
+# CASE1-NEXT: }
+# CASE1-NEXT: ]
+# CASE1-NEXT:}
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2MSB
+ Type: ET_EXEC
+Sections:
+ - Name: .sframe
+ Type: SHT_GNU_SFRAME
+ Flags: [ SHF_ALLOC ]
+ ContentArray: [
+ 0xde, 0xe2, 0x02, 0x05, # Preamble (magic, version, flags)
+ # Header:
+ 0x01, 0x42, 0x47, 0x00, # ABI, Fixed FP offset, Fixed RA Offset, AUX header length
+ 0x00, 0x00, 0x00, 0x01, # Number of FDEs
+ 0x00, 0x00, 0x00, 0x10, # Number of FREs
+ 0x00, 0x00, 0x10, 0x00, # FRE length
+ 0x00, 0x00, 0x00, 0x00, # FDE offset
+ 0x00, 0x00, 0x01, 0x00, # FRE offset
+
+ # FDE:
+ 0x00, 0xde, 0xad, 0x00, # Start Address
+ 0x00, 0x00, 0x01, 0xbe, # Size
+ 0x00, 0x00, 0x00, 0x10, # Start FRE Offset
+ 0x00, 0x00, 0x00, 0x10, # Number of FREs
+ 0x02, 0xde, 0xad, 0x00, # Info, RepSize, Padding2
+ ]
+# CASE2-LABEL:SFrame section '.sframe' {
+# CASE2: Header {
+# CASE2-NEXT: Magic: 0xDEE2
+# CASE2-NEXT: Version: V2 (0x2)
+# CASE2-NEXT: Flags [ (0x5)
+# CASE2-NEXT: FDEFuncStartPCRel (0x4){{ *}}
+# CASE2-NEXT: FDESorted (0x1){{ *}}
+# CASE2-NEXT: ]
+# CASE2-NEXT: ABI: AArch64EndianBig (0x1)
+# CASE2-NEXT: CFA fixed FP offset (unused): 66
+# CASE2-NEXT: CFA fixed RA offset (unused): 71
+# CASE2-NEXT: Auxiliary header length: 0
+# CASE2-NEXT: Num FDEs: 1
+# CASE2-NEXT: Num FREs: 16
+# CASE2-NEXT: FRE subsection length: 4096
+# CASE2-NEXT: FDE subsection offset: 0
+# CASE2-NEXT: FRE subsection offset: 256
+# CASE2-NEXT: Auxiliary header: []
+# CASE2-NEXT: }
+# CASE2-NEXT: Function Index [
+# CASE2-NEXT: FuncDescEntry [0] {
+# CASE2-NEXT: PC: 0xDEAD1C
+# CASE2-NEXT: Size: 0x1BE
+# CASE2-NEXT: Start FRE Offset: 0x10
+# CASE2-NEXT: Num FREs: 16
+# CASE2-NEXT: Info {
+# CASE2-NEXT: FRE Type: Addr4 (0x2)
+# CASE2-NEXT: FDE Type: PCInc (0x0)
+# CASE2-NEXT: PAuth Key: A (0x0)
+# CASE2-NEXT: Raw: 0x2
+# CASE2-NEXT: }
+# CASE2-NEXT: Repetitive block size (unused): 0xDE
+# CASE2-NEXT: Padding2: 0xAD00
+# CASE2-NEXT: }
+# CASE2-NEXT: ]
+# CASE2-NEXT:}
diff --git a/llvm/test/tools/llvm-readobj/ELF/sframe-header.test b/llvm/test/tools/llvm-readobj/ELF/sframe-header.test
index f827296..e7c0db0 100644
--- a/llvm/test/tools/llvm-readobj/ELF/sframe-header.test
+++ b/llvm/test/tools/llvm-readobj/ELF/sframe-header.test
@@ -2,7 +2,8 @@
# RUN: yaml2obj --docnum=1 %s -o %t.1
# RUN: llvm-readobj --sframe=.sframe_bad_sh_size --sframe=.sframe_1b \
# RUN: --sframe=.sframe_bad_magic --sframe=.sframe_bad_version \
-# RUN: --sframe=.sframe_6b --sframe=.sframe_header %t.1 2>&1 | \
+# RUN: --sframe=.sframe_6b --sframe=.sframe_short_auxheader \
+# RUN: --sframe=.sframe_header %t.1 2>&1 | \
# RUN: FileCheck %s --strict-whitespace --match-full-lines \
# RUN: -DFILE=%t.1 --check-prefix=CASE1
@@ -60,24 +61,24 @@ Sections:
0xe2, 0xde, 0x02, 0x00, # Preamble (magic, version, flags)
0x01, 0x02
]
-
# CASE1-LABEL:SFrame section '.sframe_6b' {
# CASE1:{{.*}}: warning: '[[FILE]]': invalid sframe section: unexpected end of data at offset 0x6 while reading [0x0, 0x1c)
- - Name: .sframe_header
+ - Name: .sframe_short_auxheader
Type: SHT_GNU_SFRAME
Flags: [ SHF_ALLOC ]
ContentArray: [
0xe2, 0xde, 0x02, 0x06, # Preamble (magic, version, flags)
# Header:
- 0x03, 0x42, 0x47, 0x00, # ABI, Fixed FP offset, Fixed RA Offset, AUX header length
+ 0x03, 0x42, 0x47, 0x08, # ABI, Fixed FP offset, Fixed RA Offset, AUX header length
0x01, 0x00, 0x00, 0x00, # Number of FDEs
0x10, 0x00, 0x00, 0x00, # Number of FREs
0x00, 0x10, 0x00, 0x00, # FRE length
- 0x04, 0x00, 0x00, 0x00, # FDE offset
+ 0x00, 0x00, 0x00, 0x00, # FDE offset
0x00, 0x01, 0x00, 0x00, # FRE offset
+ 0xde, 0xad, 0xbe, 0xef, # AUX header
]
-# CASE1-LABEL:SFrame section '.sframe_header' {
+# CASE1-LABEL:SFrame section '.sframe_short_auxheader' {
# CASE1: Header {
# CASE1-NEXT: Magic: 0xDEE2
# CASE1-NEXT: Version: V2 (0x2)
@@ -88,13 +89,52 @@ Sections:
# CASE1-NEXT: ABI: AMD64EndianLittle (0x3)
# CASE1-NEXT: CFA fixed FP offset (unused): 66
# CASE1-NEXT: CFA fixed RA offset: 71
-# CASE1-NEXT: Auxiliary header length: 0
+# CASE1-NEXT: Auxiliary header length: 8
# CASE1-NEXT: Num FDEs: 1
# CASE1-NEXT: Num FREs: 16
# CASE1-NEXT: FRE subsection length: 4096
-# CASE1-NEXT: FDE subsection offset: 4
+# CASE1-NEXT: FDE subsection offset: 0
+# CASE1-NEXT: FRE subsection offset: 256
+# CASE1-NEXT:{{.*}}: warning: '[[FILE]]': unexpected end of data at offset 0x20 while reading [0x1c, 0x24)
+# CASE1-NEXT: }
+# CASE1-NEXT:{{.*}}: warning: '[[FILE]]': unexpected end of data at offset 0x20 while reading [0x24, 0x38)
+# CASE1-NEXT:}
+
+ - Name: .sframe_header
+ Type: SHT_GNU_SFRAME
+ Flags: [ SHF_ALLOC ]
+ ContentArray: [
+ 0xe2, 0xde, 0x02, 0x06, # Preamble (magic, version, flags)
+ # Header:
+ 0x03, 0x42, 0x47, 0x04, # ABI, Fixed FP offset, Fixed RA Offset, AUX header length
+ 0x00, 0x00, 0x00, 0x00, # Number of FDEs
+ 0x10, 0x00, 0x00, 0x00, # Number of FREs
+ 0x00, 0x10, 0x00, 0x00, # FRE length
+ 0x00, 0x00, 0x00, 0x00, # FDE offset
+ 0x00, 0x01, 0x00, 0x00, # FRE offset
+ 0xde, 0xad, 0xbe, 0xef, # AUX header
+ ]
+# CASE1-LABEL:SFrame section '.sframe_header' {
+# CASE1: Header {
+# CASE1-NEXT: Magic: 0xDEE2
+# CASE1-NEXT: Version: V2 (0x2)
+# CASE1-NEXT: Flags [ (0x6)
+# CASE1-NEXT: FDEFuncStartPCRel (0x4){{ *}}
+# CASE1-NEXT: FramePointer (0x2){{ *}}
+# CASE1-NEXT: ]
+# CASE1-NEXT: ABI: AMD64EndianLittle (0x3)
+# CASE1-NEXT: CFA fixed FP offset (unused): 66
+# CASE1-NEXT: CFA fixed RA offset: 71
+# CASE1-NEXT: Auxiliary header length: 4
+# CASE1-NEXT: Num FDEs: 0
+# CASE1-NEXT: Num FREs: 16
+# CASE1-NEXT: FRE subsection length: 4096
+# CASE1-NEXT: FDE subsection offset: 0
# CASE1-NEXT: FRE subsection offset: 256
+# CASE1-NEXT: Auxiliary header: [0xDE, 0xAD, 0xBE, 0xEF]
# CASE1-NEXT: }
+# CASE1-NEXT: Function Index [
+# CASE1-NEXT: ]
# CASE1-NEXT:}
--- !ELF
@@ -110,10 +150,10 @@ Sections:
0xde, 0xe2, 0x02, 0x01, # Preamble (magic, version, flags)
# Header:
0x01, 0x42, 0x47, 0x00, # ABI, Fixed FP offset, Fixed RA Offset, AUX header length
- 0x00, 0x00, 0x00, 0x01, # Number of FDEs
+ 0x00, 0x00, 0x00, 0x00, # Number of FDEs
0x00, 0x00, 0x00, 0x10, # Number of FREs
0x00, 0x00, 0x10, 0x00, # FRE length
- 0x00, 0x00, 0x00, 0x04, # FDE offset
+ 0x00, 0x00, 0x00, 0x00, # FDE offset
0x00, 0x00, 0x01, 0x00, # FRE offset
]
# CASE2-LABEL:SFrame section '.sframe' {
@@ -127,12 +167,15 @@ Sections:
# CASE2-NEXT: CFA fixed FP offset (unused): 66
# CASE2-NEXT: CFA fixed RA offset (unused): 71
# CASE2-NEXT: Auxiliary header length: 0
-# CASE2-NEXT: Num FDEs: 1
+# CASE2-NEXT: Num FDEs: 0
# CASE2-NEXT: Num FREs: 16
# CASE2-NEXT: FRE subsection length: 4096
-# CASE2-NEXT: FDE subsection offset: 4
+# CASE2-NEXT: FDE subsection offset: 0
# CASE2-NEXT: FRE subsection offset: 256
+# CASE2-NEXT: Auxiliary header: []
# CASE2-NEXT: }
+# CASE2-NEXT: Function Index [
+# CASE2-NEXT: ]
# CASE2-NEXT:}
--- !ELF
diff --git a/llvm/test/tools/obj2yaml/ELF/eflags.yaml b/llvm/test/tools/obj2yaml/ELF/eflags.yaml
new file mode 100644
index 0000000..da16a62
--- /dev/null
+++ b/llvm/test/tools/obj2yaml/ELF/eflags.yaml
@@ -0,0 +1,31 @@
+## Check how obj2yaml dumps e_flags field.
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2MSB
+ Type: ET_EXEC
+ Machine: EM_SPARC32PLUS
+ Flags: [ [[FLAGS]] ]
+
+# RUN: yaml2obj -DFLAGS="EF_SPARC_32PLUS " %s -o %t2
+# RUN: obj2yaml %t2 | FileCheck %s --check-prefix=FLAG
+
+# FLAG: --- !ELF
+# FLAG-NEXT: FileHeader:
+# FLAG-NEXT: Class: ELFCLASS64
+# FLAG-NEXT: Data: ELFDATA2MSB
+# FLAG-NEXT: Type: ET_EXEC
+# FLAG-NEXT: Machine: EM_SPARC32PLUS
+# FLAG-NEXT: Flags: [ EF_SPARC_32PLUS ]
+
+# RUN: yaml2obj -DFLAGS="EF_SPARC_HAL_R1 " %s -o %t3
+# RUN: obj2yaml %t3 | FileCheck %s --check-prefix=FLAG2
+
+# FLAG2: --- !ELF
+# FLAG2-NEXT: FileHeader:
+# FLAG2-NEXT: Class: ELFCLASS64
+# FLAG2-NEXT: Data: ELFDATA2MSB
+# FLAG2-NEXT: Type: ET_EXEC
+# FLAG2-NEXT: Machine: EM_SPARC32PLUS
+# FLAG2-NEXT: Flags: [ EF_SPARC_HAL_R1 ]
diff --git a/llvm/test/tools/yaml2obj/file-header-flags.yaml b/llvm/test/tools/yaml2obj/file-header-flags.yaml
new file mode 100644
index 0000000..baa101a
--- /dev/null
+++ b/llvm/test/tools/yaml2obj/file-header-flags.yaml
@@ -0,0 +1,25 @@
+## Test for FileHeader Flags.
+
+## When FLAGS variable isn't defined, the e_flags value is 0.
+## Otherwise, it's the specified value.
+
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-readobj -h %t | FileCheck %s --check-prefixes=NO-FLAG
+
+# RUN: yaml2obj %s -o %t -DFLAGS=[EF_SPARC_32PLUS]
+# RUN: llvm-readobj -h %t | FileCheck %s --check-prefixes=FLAG
+
+!ELF
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_SPARC32PLUS
+ Flags: [[FLAGS=<none>]]
+
+# NO-FLAG: Flags [ (0x0)
+# NO-FLAG-NEXT: ]
+
+# FLAG: Flags [ (0x100)
+# FLAG-NEXT: EF_SPARC_32PLUS (0x100)
+# FLAG-NEXT: ]
diff --git a/llvm/tools/llc/llc.cpp b/llvm/tools/llc/llc.cpp
index 93b4a50..b3d7185 100644
--- a/llvm/tools/llc/llc.cpp
+++ b/llvm/tools/llc/llc.cpp
@@ -733,8 +733,8 @@ static int compileModule(char **argv, LLVMContext &Context) {
reportError("target does not support generation of this file type");
}
- const_cast<TargetLoweringObjectFile *>(Target->getObjFileLowering())
- ->Initialize(MMIWP->getMMI().getContext(), *Target);
+ Target->getObjFileLowering()->Initialize(MMIWP->getMMI().getContext(),
+ *Target);
if (MIR) {
assert(MMIWP && "Forgot to create MMIWP?");
if (MIR->parseMachineFunctions(*M, MMIWP->getMMI()))
diff --git a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp
index 1659cfb..5dae6c0 100644
--- a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp
+++ b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp
@@ -30,11 +30,12 @@
#include <memory>
#include <string>
#include <vector>
-#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
+#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) && \
+ !defined(_M_ARM64EC)
#include <immintrin.h>
#include <intrin.h>
#endif
-#if defined(_MSC_VER) && defined(_M_X64)
+#if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
#include <float.h> // For _clearfp in ~X86SavedState().
#endif
@@ -654,7 +655,7 @@ namespace {
class X86SavedState : public ExegesisTarget::SavedState {
public:
X86SavedState() {
-#if defined(_MSC_VER) && defined(_M_X64)
+#if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
_fxsave64(FPState);
Eflags = __readeflags();
#elif defined(__GNUC__) && defined(__x86_64__)
@@ -668,7 +669,7 @@ public:
~X86SavedState() {
// Restoring the X87 state does not flush pending exceptions, make sure
// these exceptions are flushed now.
-#if defined(_MSC_VER) && defined(_M_X64)
+#if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
_clearfp();
_fxrstor64(FPState);
__writeeflags(Eflags);
@@ -682,7 +683,7 @@ public:
}
private:
-#if defined(__x86_64__) || defined(_M_X64)
+#if defined(__x86_64__) || defined(_M_X64) && !defined(_M_ARM64EC)
alignas(16) char FPState[512];
uint64_t Eflags;
#endif
@@ -824,8 +825,9 @@ private:
// For now, only do the check if we see an Intel machine because
// the counter uses some intel-specific magic and it could
// be confuse and think an AMD machine actually has LBR support.
-#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
- defined(_M_X64)
+#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
+ defined(_M_X64)) && \
+ !defined(_M_ARM64EC)
using namespace sys::detail::x86;
if (getVendorSignature() == VendorSignatures::GENUINE_INTEL)
diff --git a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
index e1e5fad..f6ed94b 100644
--- a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
+++ b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
@@ -9,13 +9,20 @@
/// \file
/// This file implements the IR2Vec embedding generation tool.
///
-/// This tool provides two main functionalities:
+/// This tool provides three main modes:
///
/// 1. Triplet Generation Mode (--mode=triplets):
-/// Generates triplets (opcode, type, operands) for vocabulary training.
-/// Usage: llvm-ir2vec --mode=triplets input.bc -o triplets.txt
+/// Generates numeric triplets (head, tail, relation) for vocabulary
+/// training. Output format: MAX_RELATION=N header followed by
+/// head\ttail\trelation lines. Relations: 0=Type, 1=Next, 2+=Arg0,Arg1,...
+/// Usage: llvm-ir2vec --mode=triplets input.bc -o train2id.txt
///
-/// 2. Embedding Generation Mode (--mode=embeddings):
+/// 2. Entities Generation Mode (--mode=entities):
+/// Generates entity mappings for vocabulary training.
+/// Output format: <total_entities> header followed by entity\tid lines.
+/// Usage: llvm-ir2vec --mode=entities input.bc -o entity2id.txt
+///
+/// 3. Embedding Generation Mode (--mode=embeddings):
/// Generates IR2Vec embeddings using a trained vocabulary.
/// Usage: llvm-ir2vec --mode=embeddings --ir2vec-vocab-path=vocab.json
/// --level=func input.bc -o embeddings.txt Levels: --level=inst
@@ -60,16 +67,19 @@ static cl::opt<std::string> OutputFilename("o", cl::desc("Output filename"),
enum ToolMode {
TripletMode, // Generate triplets for vocabulary training
+ EntityMode, // Generate entity mappings for vocabulary training
EmbeddingMode // Generate embeddings using trained vocabulary
};
-static cl::opt<ToolMode>
- Mode("mode", cl::desc("Tool operation mode:"),
- cl::values(clEnumValN(TripletMode, "triplets",
- "Generate triplets for vocabulary training"),
- clEnumValN(EmbeddingMode, "embeddings",
- "Generate embeddings using trained vocabulary")),
- cl::init(EmbeddingMode), cl::cat(IR2VecToolCategory));
+static cl::opt<ToolMode> Mode(
+ "mode", cl::desc("Tool operation mode:"),
+ cl::values(clEnumValN(TripletMode, "triplets",
+ "Generate triplets for vocabulary training"),
+ clEnumValN(EntityMode, "entities",
+ "Generate entity mappings for vocabulary training"),
+ clEnumValN(EmbeddingMode, "embeddings",
+ "Generate embeddings using trained vocabulary")),
+ cl::init(EmbeddingMode), cl::cat(IR2VecToolCategory));
static cl::opt<std::string>
FunctionName("function", cl::desc("Process specific function only"),
@@ -94,6 +104,13 @@ static cl::opt<EmbeddingLevel>
namespace {
+/// Relation types for triplet generation
+enum RelationType {
+ TypeRelation = 0, ///< Instruction to type relationship
+ NextRelation = 1, ///< Sequential instruction relationship
+ ArgRelation = 2 ///< Instruction to operand relationship (ArgRelation + N)
+};
+
/// Helper class for collecting IR triplets and generating embeddings
class IR2VecTool {
private:
@@ -111,29 +128,101 @@ public:
// option
MAM.registerPass([&] { return PassInstrumentationAnalysis(); });
MAM.registerPass([&] { return IR2VecVocabAnalysis(); });
+ // This will throw an error if vocab is not found or invalid
Vocab = &MAM.getResult<IR2VecVocabAnalysis>(M);
return Vocab->isValid();
}
- /// Generate triplets for the entire module
+ /// Generate triplets for the module
+ /// Output format: MAX_RELATION=N header followed by relationships
void generateTriplets(raw_ostream &OS) const {
- for (const Function &F : M)
- generateTriplets(F, OS);
+ unsigned MaxRelation = NextRelation; // Track maximum relation ID
+ std::string Relationships;
+ raw_string_ostream RelOS(Relationships);
+
+ for (const Function &F : M) {
+ unsigned FuncMaxRelation = generateTriplets(F, RelOS);
+ MaxRelation = std::max(MaxRelation, FuncMaxRelation);
+ }
+
+ RelOS.flush();
+
+ // Write metadata header followed by relationships
+ OS << "MAX_RELATION=" << MaxRelation << '\n';
+ OS << Relationships;
}
/// Generate triplets for a single function
- void generateTriplets(const Function &F, raw_ostream &OS) const {
+ /// Returns the maximum relation ID used in this function
+ unsigned generateTriplets(const Function &F, raw_ostream &OS) const {
if (F.isDeclaration())
- return;
+ return 0;
+
+ unsigned MaxRelation = 1;
+ unsigned PrevOpcode = 0;
+ bool HasPrevOpcode = false;
+
+ for (const BasicBlock &BB : F) {
+ for (const auto &I : BB.instructionsWithoutDebug()) {
+ unsigned Opcode = Vocabulary::getNumericID(I.getOpcode());
+ unsigned TypeID = Vocabulary::getNumericID(I.getType()->getTypeID());
+
+ // Add "Next" relationship with previous instruction
+ if (HasPrevOpcode) {
+ OS << PrevOpcode << '\t' << Opcode << '\t' << NextRelation << '\n';
+ LLVM_DEBUG(dbgs()
+ << Vocabulary::getVocabKeyForOpcode(PrevOpcode + 1) << '\t'
+ << Vocabulary::getVocabKeyForOpcode(Opcode + 1) << '\t'
+ << "Next\n");
+ }
- std::string LocalOutput;
- raw_string_ostream LocalOS(LocalOutput);
+ // Add "Type" relationship
+ OS << Opcode << '\t' << TypeID << '\t' << TypeRelation << '\n';
+ LLVM_DEBUG(
+ dbgs() << Vocabulary::getVocabKeyForOpcode(Opcode + 1) << '\t'
+ << Vocabulary::getVocabKeyForTypeID(I.getType()->getTypeID())
+ << '\t' << "Type\n");
+
+ // Add "Arg" relationships
+ unsigned ArgIndex = 0;
+ for (const Use &U : I.operands()) {
+ unsigned OperandID = Vocabulary::getNumericID(U.get());
+ unsigned RelationID = ArgRelation + ArgIndex;
+ OS << Opcode << '\t' << OperandID << '\t' << RelationID << '\n';
+
+ LLVM_DEBUG({
+ StringRef OperandStr = Vocabulary::getVocabKeyForOperandKind(
+ Vocabulary::getOperandKind(U.get()));
+ dbgs() << Vocabulary::getVocabKeyForOpcode(Opcode + 1) << '\t'
+ << OperandStr << '\t' << "Arg" << ArgIndex << '\n';
+ });
+
+ ++ArgIndex;
+ }
+ // Only update MaxRelation if there were operands
+ if (ArgIndex > 0) {
+ MaxRelation = std::max(MaxRelation, ArgRelation + ArgIndex - 1);
+ }
+ PrevOpcode = Opcode;
+ HasPrevOpcode = true;
+ }
+ }
- for (const BasicBlock &BB : F)
- traverseBasicBlock(BB, LocalOS);
+ return MaxRelation;
+ }
- LocalOS.flush();
- OS << LocalOutput;
+ /// Dump entity ID to string mappings
+ static void generateEntityMappings(raw_ostream &OS) {
+ // FIXME: Currently, the generated entity mappings are not one-to-one;
+ // Multiple TypeIDs map to same string key (Like Half, BFloat, etc. map to
+ // FloatTy). This would hinder learning good seed embeddings.
+ // We should fix this in the future by ensuring unique string keys either by
+ // post-processing here without changing the mapping in ir2vec::Vocabulary,
+ // or by changing the Vocabulary generation logic to ensure unique keys.
+ auto EntityLen = Vocabulary::expectedSize();
+ OS << EntityLen << "\n";
+ for (unsigned EntityID = 0; EntityID < EntityLen; ++EntityID)
+ OS << Vocabulary::getStringKey(EntityID) << '\t' << EntityID << '\n';
}
/// Generate embeddings for the entire module
@@ -197,31 +286,6 @@ public:
}
}
}
-
-private:
- /// Process a single basic block for triplet generation
- void traverseBasicBlock(const BasicBlock &BB, raw_string_ostream &OS) const {
- // Consider only non-debug and non-pseudo instructions
- for (const auto &I : BB.instructionsWithoutDebug()) {
- StringRef OpcStr = Vocabulary::getVocabKeyForOpcode(I.getOpcode());
- StringRef TypeStr =
- Vocabulary::getVocabKeyForTypeID(I.getType()->getTypeID());
-
- OS << '\n' << OpcStr << ' ' << TypeStr << ' ';
-
- LLVM_DEBUG({
- I.print(dbgs());
- dbgs() << "\n";
- I.getType()->print(dbgs());
- dbgs() << " Type\n";
- });
-
- for (const Use &U : I.operands())
- OS << Vocabulary::getVocabKeyForOperandKind(
- Vocabulary::getOperandKind(U.get()))
- << ' ';
- }
- }
};
Error processModule(Module &M, raw_ostream &OS) {
@@ -230,11 +294,9 @@ Error processModule(Module &M, raw_ostream &OS) {
if (Mode == EmbeddingMode) {
// Initialize vocabulary for embedding generation
// Note: Requires --ir2vec-vocab-path option to be set
- if (!Tool.initializeVocabulary())
- return createStringError(
- errc::invalid_argument,
- "Failed to initialize IR2Vec vocabulary. "
- "Make sure to specify --ir2vec-vocab-path for embedding mode.");
+ auto VocabStatus = Tool.initializeVocabulary();
+ assert(VocabStatus && "Failed to initialize IR2Vec vocabulary");
+ (void)VocabStatus;
if (!FunctionName.empty()) {
// Process single function
@@ -249,18 +311,7 @@ Error processModule(Module &M, raw_ostream &OS) {
Tool.generateEmbeddings(OS);
}
} else {
- // Triplet generation mode - no vocabulary needed
- if (!FunctionName.empty())
- // Process single function
- if (const Function *F = M.getFunction(FunctionName))
- Tool.generateTriplets(*F, OS);
- else
- return createStringError(errc::invalid_argument,
- "Function '%s' not found",
- FunctionName.c_str());
- else
- // Process all functions
- Tool.generateTriplets(OS);
+ Tool.generateTriplets(OS);
}
return Error::success();
}
@@ -284,8 +335,25 @@ int main(int argc, char **argv) {
"information.\n");
// Validate command line options
- if (Mode == TripletMode && Level.getNumOccurrences() > 0)
- errs() << "Warning: --level option is ignored in triplet mode\n";
+ if (Mode != EmbeddingMode) {
+ if (Level.getNumOccurrences() > 0)
+ errs() << "Warning: --level option is ignored\n";
+ if (FunctionName.getNumOccurrences() > 0)
+ errs() << "Warning: --function option is ignored\n";
+ }
+
+ std::error_code EC;
+ raw_fd_ostream OS(OutputFilename, EC);
+ if (EC) {
+ errs() << "Error opening output file: " << EC.message() << "\n";
+ return 1;
+ }
+
+ if (Mode == EntityMode) {
+ // Just dump entity mappings without processing any IR
+ IR2VecTool::generateEntityMappings(OS);
+ return 0;
+ }
// Parse the input LLVM IR file or stdin
SMDiagnostic Err;
@@ -296,13 +364,6 @@ int main(int argc, char **argv) {
return 1;
}
- std::error_code EC;
- raw_fd_ostream OS(OutputFilename, EC);
- if (EC) {
- errs() << "Error opening output file: " << EC.message() << "\n";
- return 1;
- }
-
if (Error Err = processModule(*M, OS)) {
handleAllErrors(std::move(Err), [&](const ErrorInfoBase &EIB) {
errs() << "Error: " << EIB.message() << "\n";
diff --git a/llvm/tools/llvm-mc/Disassembler.cpp b/llvm/tools/llvm-mc/Disassembler.cpp
index 8672793..2ee4221 100644
--- a/llvm/tools/llvm-mc/Disassembler.cpp
+++ b/llvm/tools/llvm-mc/Disassembler.cpp
@@ -24,6 +24,7 @@
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
@@ -32,24 +33,29 @@ using namespace llvm;
typedef std::pair<std::vector<unsigned char>, std::vector<const char *>>
ByteArrayTy;
-static bool PrintInsts(const MCDisassembler &DisAsm, const ByteArrayTy &Bytes,
+static MCDisassembler::DecodeStatus getInstruction(const MCDisassembler &DisAsm,
+ const MCSubtargetInfo &STI,
+ MCInst &Inst, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes,
+ uint64_t Address) {
+ if (STI.getTargetTriple().getArch() == Triple::hexagon)
+ return DisAsm.getInstructionBundle(Inst, Size, Bytes, Address, nulls());
+ return DisAsm.getInstruction(Inst, Size, Bytes, Address, nulls());
+}
+
+static bool printInsts(const MCDisassembler &DisAsm, const ByteArrayTy &Bytes,
SourceMgr &SM, MCStreamer &Streamer, bool InAtomicBlock,
- const MCSubtargetInfo &STI) {
+ const MCSubtargetInfo &STI, unsigned NumBenchmarkRuns) {
ArrayRef<uint8_t> Data(Bytes.first);
// Disassemble it to strings.
uint64_t Size;
- uint64_t Index;
- for (Index = 0; Index < Bytes.first.size(); Index += Size) {
- MCInst Inst;
+ for (uint64_t Index = 0; Index < Bytes.first.size(); Index += Size) {
- MCDisassembler::DecodeStatus S;
- if (STI.getTargetTriple().getArch() == Triple::hexagon)
- S = DisAsm.getInstructionBundle(Inst, Size, Data.slice(Index), Index,
- nulls());
- else
- S = DisAsm.getInstruction(Inst, Size, Data.slice(Index), Index, nulls());
+ MCInst Inst;
+ MCDisassembler::DecodeStatus S =
+ getInstruction(DisAsm, STI, Inst, Size, Data.slice(Index), Index);
switch (S) {
case MCDisassembler::Fail:
SM.PrintMessage(SMLoc::getFromPointer(Bytes.second[Index]),
@@ -74,6 +80,18 @@ static bool PrintInsts(const MCDisassembler &DisAsm, const ByteArrayTy &Bytes,
Streamer.emitInstruction(Inst, STI);
break;
}
+
+ if (S == MCDisassembler::Success && NumBenchmarkRuns != 0) {
+ // Benchmark mode, collect timing for decoding the instruction several
+ // times.
+ MCInst BMInst;
+ TimeTraceScope timeScope("getInstruction");
+ for (unsigned I = 0; I < NumBenchmarkRuns; ++I) {
+ BMInst.clear();
+ BMInst.setOpcode(0);
+ S = getInstruction(DisAsm, STI, BMInst, Size, Data.slice(Index), Index);
+ }
+ }
}
return false;
@@ -151,7 +169,7 @@ int Disassembler::disassemble(const Target &T, const std::string &Triple,
MCSubtargetInfo &STI, MCStreamer &Streamer,
MemoryBuffer &Buffer, SourceMgr &SM,
MCContext &Ctx, const MCTargetOptions &MCOptions,
- bool HexBytes) {
+ bool HexBytes, unsigned NumBenchmarkRuns) {
std::unique_ptr<const MCRegisterInfo> MRI(T.createMCRegInfo(Triple));
if (!MRI) {
errs() << "error: no register info for target " << Triple << "\n";
@@ -207,8 +225,8 @@ int Disassembler::disassemble(const Target &T, const std::string &Triple,
ErrorOccurred |= byteArrayFromString(ByteArray, Str, SM, HexBytes);
if (!ByteArray.first.empty())
- ErrorOccurred |=
- PrintInsts(*DisAsm, ByteArray, SM, Streamer, InAtomicBlock, STI);
+ ErrorOccurred |= printInsts(*DisAsm, ByteArray, SM, Streamer,
+ InAtomicBlock, STI, NumBenchmarkRuns);
}
if (InAtomicBlock) {
diff --git a/llvm/tools/llvm-mc/Disassembler.h b/llvm/tools/llvm-mc/Disassembler.h
index 5efffca..5ee47d5 100644
--- a/llvm/tools/llvm-mc/Disassembler.h
+++ b/llvm/tools/llvm-mc/Disassembler.h
@@ -32,7 +32,8 @@ public:
static int disassemble(const Target &T, const std::string &Triple,
MCSubtargetInfo &STI, MCStreamer &Streamer,
MemoryBuffer &Buffer, SourceMgr &SM, MCContext &Ctx,
- const MCTargetOptions &MCOptions, bool HexBytes);
+ const MCTargetOptions &MCOptions, bool HexBytes,
+ unsigned NumBenchmarkRuns);
};
} // namespace llvm
diff --git a/llvm/tools/llvm-mc/llvm-mc.cpp b/llvm/tools/llvm-mc/llvm-mc.cpp
index da89af7..f69f7c7 100644
--- a/llvm/tools/llvm-mc/llvm-mc.cpp
+++ b/llvm/tools/llvm-mc/llvm-mc.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "Disassembler.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/DWARFCFIChecker/DWARFCFIFunctionFrameAnalyzer.h"
#include "llvm/DWARFCFIChecker/DWARFCFIFunctionFrameStreamer.h"
#include "llvm/MC/MCAsmBackend.h"
@@ -37,6 +38,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/WithColor.h"
#include "llvm/TargetParser/Host.h"
@@ -240,6 +242,23 @@ static cl::opt<ActionType> Action(
"Colored disassembly of strings of hex bytes")),
cl::cat(MCCategory));
+static cl::opt<unsigned>
+ NumBenchmarkRuns("runs", cl::desc("Number of runs for benchmarking"),
+ cl::cat(MCCategory));
+
+static cl::opt<bool> TimeTrace("time-trace", cl::desc("Record time trace"));
+
+static cl::opt<unsigned> TimeTraceGranularity(
+ "time-trace-granularity",
+ cl::desc(
+ "Minimum time granularity (in microseconds) traced by time profiler"),
+ cl::init(500), cl::Hidden);
+
+static cl::opt<std::string>
+ TimeTraceFile("time-trace-file",
+ cl::desc("Specify time trace file destination"),
+ cl::value_desc("filename"));
+
static const Target *GetTarget(const char *ProgName) {
// Figure out the target triple.
if (TripleName.empty())
@@ -371,6 +390,20 @@ int main(int argc, char **argv) {
cl::HideUnrelatedOptions({&MCCategory, &getColorCategory()});
cl::ParseCommandLineOptions(argc, argv, "llvm machine code playground\n");
+
+ if (TimeTrace)
+ timeTraceProfilerInitialize(TimeTraceGranularity, argv[0]);
+
+ auto TimeTraceScopeExit = make_scope_exit([]() {
+ if (!TimeTrace)
+ return;
+ if (auto E = timeTraceProfilerWrite(TimeTraceFile, OutputFilename)) {
+ logAllUnhandledErrors(std::move(E), errs());
+ return;
+ }
+ timeTraceProfilerCleanup();
+ });
+
MCTargetOptions MCOptions = mc::InitMCTargetOptionsFromFlags();
MCOptions.CompressDebugSections = CompressDebugSections.getValue();
MCOptions.ShowMCInst = ShowInst;
@@ -620,7 +653,8 @@ int main(int argc, char **argv) {
}
if (disassemble)
Res = Disassembler::disassemble(*TheTarget, TripleName, *STI, *Str, *Buffer,
- SrcMgr, Ctx, MCOptions, HexBytes);
+ SrcMgr, Ctx, MCOptions, HexBytes,
+ NumBenchmarkRuns);
// Keep output if no errors.
if (Res == 0) {
@@ -628,5 +662,6 @@ int main(int argc, char **argv) {
if (DwoOut)
DwoOut->keep();
}
+
return Res;
}
diff --git a/llvm/tools/llvm-rc/llvm-rc.cpp b/llvm/tools/llvm-rc/llvm-rc.cpp
index 7362154..f623342 100644
--- a/llvm/tools/llvm-rc/llvm-rc.cpp
+++ b/llvm/tools/llvm-rc/llvm-rc.cpp
@@ -201,7 +201,7 @@ std::string getMingwTriple() {
Triple T(sys::getDefaultTargetTriple());
if (!isUsableArch(T.getArch()))
T.setArch(getDefaultFallbackArch());
- if (T.isWindowsGNUEnvironment())
+ if (T.isOSCygMing())
return T.str();
// Write out the literal form of the vendor/env here, instead of
// constructing them with enum values (which end up with them in
diff --git a/llvm/tools/llvm-readobj/COFFDumper.cpp b/llvm/tools/llvm-readobj/COFFDumper.cpp
index dce8e60..96e0a634 100644
--- a/llvm/tools/llvm-readobj/COFFDumper.cpp
+++ b/llvm/tools/llvm-readobj/COFFDumper.cpp
@@ -412,10 +412,19 @@ const EnumEntry<COFF::DLLCharacteristics> PEDLLCharacteristics[] = {
LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE),
};
+// clang-format off
static const EnumEntry<COFF::ExtendedDLLCharacteristics>
PEExtendedDLLCharacteristics[] = {
- LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT),
+ LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT ),
+ LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT_STRICT_MODE ),
+ LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_EX_CET_SET_CONTEXT_IP_VALIDATION_RELAXED_MODE),
+ LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_EX_CET_DYNAMIC_APIS_ALLOW_IN_PROC_ONLY ),
+ LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_EX_CET_RESERVED_1 ),
+ LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_EX_CET_RESERVED_2 ),
+ LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_EX_FORWARD_CFI_COMPAT ),
+ LLVM_READOBJ_ENUM_ENT(COFF, IMAGE_DLL_CHARACTERISTICS_EX_HOTPATCH_COMPATIBLE ),
};
+// clang-format on
static const EnumEntry<COFF::SectionCharacteristics>
ImageSectionCharacteristics[] = {
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index 2699e10..66153ad 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -424,6 +424,9 @@ protected:
ArrayRef<Elf_Word> getShndxTable(const Elf_Shdr *Symtab) const;
+ void printSFrameHeader(const SFrameParser<ELFT::Endianness> &Parser);
+ void printSFrameFDEs(const SFrameParser<ELFT::Endianness> &Parser);
+
private:
mutable SmallVector<std::optional<VersionEntry>, 0> VersionMap;
};
@@ -1680,7 +1683,9 @@ const EnumEntry<unsigned> ElfHeaderNVPTXFlags[] = {
ENUM_ENT(EF_CUDA_SM75, "sm_75"), ENUM_ENT(EF_CUDA_SM80, "sm_80"),
ENUM_ENT(EF_CUDA_SM86, "sm_86"), ENUM_ENT(EF_CUDA_SM87, "sm_87"),
ENUM_ENT(EF_CUDA_SM89, "sm_89"), ENUM_ENT(EF_CUDA_SM90, "sm_90"),
- ENUM_ENT(EF_CUDA_SM100, "sm_100"), ENUM_ENT(EF_CUDA_SM120, "sm_120"),
+ ENUM_ENT(EF_CUDA_SM100, "sm_100"), ENUM_ENT(EF_CUDA_SM101, "sm_101"),
+ ENUM_ENT(EF_CUDA_SM103, "sm_103"), ENUM_ENT(EF_CUDA_SM120, "sm_120"),
+ ENUM_ENT(EF_CUDA_SM121, "sm_121"),
};
const EnumEntry<unsigned> ElfHeaderRISCVFlags[] = {
@@ -3656,8 +3661,10 @@ template <class ELFT> void GNUELFDumper<ELFT>::printFileHeaders() {
ElfFlags = printFlags(e.e_flags, ArrayRef(ElfHeaderXtensaFlags),
unsigned(ELF::EF_XTENSA_MACH));
else if (e.e_machine == EM_CUDA) {
- ElfFlags = printFlags(e.e_flags, ArrayRef(ElfHeaderNVPTXFlags),
- unsigned(ELF::EF_CUDA_SM));
+ unsigned Mask = e.e_ident[ELF::EI_ABIVERSION] == ELF::ELFABIVERSION_CUDA_V1
+ ? ELF::EF_CUDA_SM
+ : ELF::EF_CUDA_SM_MASK;
+ ElfFlags = printFlags(e.e_flags, ArrayRef(ElfHeaderNVPTXFlags), Mask);
if (e.e_ident[ELF::EI_ABIVERSION] == ELF::ELFABIVERSION_CUDA_V1 &&
(e.e_flags & ELF::EF_CUDA_ACCELERATORS_V1))
ElfFlags += "a";
@@ -6440,6 +6447,90 @@ template <typename ELFT> void ELFDumper<ELFT>::printMemtag() {
}
template <typename ELFT>
+void ELFDumper<ELFT>::printSFrameHeader(
+ const SFrameParser<ELFT::Endianness> &Parser) {
+ DictScope HeaderScope(W, "Header");
+
+ const sframe::Preamble<ELFT::Endianness> &Preamble = Parser.getPreamble();
+ W.printHex("Magic", Preamble.Magic.value());
+ W.printEnum("Version", Preamble.Version.value(), sframe::getVersions());
+ W.printFlags("Flags", Preamble.Flags.value(), sframe::getFlags());
+
+ const sframe::Header<ELFT::Endianness> &Header = Parser.getHeader();
+ W.printEnum("ABI", Header.ABIArch.value(), sframe::getABIs());
+
+ W.printNumber(("CFA fixed FP offset" +
+ Twine(Parser.usesFixedFPOffset() ? "" : " (unused)"))
+ .str(),
+ Header.CFAFixedFPOffset.value());
+
+ W.printNumber(("CFA fixed RA offset" +
+ Twine(Parser.usesFixedRAOffset() ? "" : " (unused)"))
+ .str(),
+ Header.CFAFixedRAOffset.value());
+
+ W.printNumber("Auxiliary header length", Header.AuxHdrLen.value());
+ W.printNumber("Num FDEs", Header.NumFDEs.value());
+ W.printNumber("Num FREs", Header.NumFREs.value());
+ W.printNumber("FRE subsection length", Header.FRELen.value());
+ W.printNumber("FDE subsection offset", Header.FDEOff.value());
+ W.printNumber("FRE subsection offset", Header.FREOff.value());
+
+ if (Expected<ArrayRef<uint8_t>> Aux = Parser.getAuxHeader())
+ W.printHexList("Auxiliary header", *Aux);
+ else
+ reportWarning(Aux.takeError(), FileName);
+}
+
+template <typename ELFT>
+void ELFDumper<ELFT>::printSFrameFDEs(
+ const SFrameParser<ELFT::Endianness> &Parser) {
+ typename SFrameParser<ELFT::Endianness>::FDERange FDEs;
+ if (Error Err = Parser.fdes().moveInto(FDEs)) {
+ reportWarning(std::move(Err), FileName);
+ return;
+ }
+
+ ListScope IndexScope(W, "Function Index");
+ for (auto It = FDEs.begin(); It != FDEs.end(); ++It) {
+ DictScope FDEScope(
+ W,
+ formatv("FuncDescEntry [{0}]", std::distance(FDEs.begin(), It)).str());
+
+ W.printHex("PC", Parser.getAbsoluteStartAddress(It));
+ W.printHex("Size", It->Size);
+ W.printHex("Start FRE Offset", It->StartFREOff);
+ W.printNumber("Num FREs", It->NumFREs);
+
+ {
+ DictScope InfoScope(W, "Info");
+ W.printEnum("FRE Type", It->getFREType(), sframe::getFRETypes());
+ W.printEnum("FDE Type", It->getFDEType(), sframe::getFDETypes());
+ switch (Parser.getHeader().ABIArch) {
+ case sframe::ABI::AArch64EndianBig:
+ case sframe::ABI::AArch64EndianLittle:
+ W.printEnum("PAuth Key", sframe::AArch64PAuthKey(It->getPAuthKey()),
+ sframe::getAArch64PAuthKeys());
+ break;
+ case sframe::ABI::AMD64EndianLittle:
+ // unused
+ break;
+ }
+
+ W.printHex("Raw", It->Info);
+ }
+
+ W.printHex(
+ ("Repetitive block size" +
+ Twine(It->getFDEType() == sframe::FDEType::PCMask ? "" : " (unused)"))
+ .str(),
+ It->RepSize);
+
+ W.printHex("Padding2", It->Padding2);
+ }
+}
+
+template <typename ELFT>
void ELFDumper<ELFT>::printSectionsAsSFrame(ArrayRef<std::string> Sections) {
constexpr endianness E = ELFT::Endianness;
for (object::SectionRef Section :
@@ -6456,8 +6547,8 @@ void ELFDumper<ELFT>::printSectionsAsSFrame(ArrayRef<std::string> Sections) {
continue;
}
- Expected<object::SFrameParser<E>> Parser =
- object::SFrameParser<E>::create(arrayRefFromStringRef(SectionContent));
+ Expected<object::SFrameParser<E>> Parser = object::SFrameParser<E>::create(
+ arrayRefFromStringRef(SectionContent), Section.getAddress());
if (!Parser) {
reportWarning(createError("invalid sframe section: " +
toString(Parser.takeError())),
@@ -6465,32 +6556,8 @@ void ELFDumper<ELFT>::printSectionsAsSFrame(ArrayRef<std::string> Sections) {
continue;
}
- DictScope HeaderScope(W, "Header");
-
- const sframe::Preamble<E> &Preamble = Parser->getPreamble();
- W.printHex("Magic", Preamble.Magic.value());
- W.printEnum("Version", Preamble.Version.value(), sframe::getVersions());
- W.printFlags("Flags", Preamble.Flags.value(), sframe::getFlags());
-
- const sframe::Header<E> &Header = Parser->getHeader();
- W.printEnum("ABI", Header.ABIArch.value(), sframe::getABIs());
-
- W.printNumber(("CFA fixed FP offset" +
- Twine(Parser->usesFixedFPOffset() ? "" : " (unused)"))
- .str(),
- Header.CFAFixedFPOffset.value());
-
- W.printNumber(("CFA fixed RA offset" +
- Twine(Parser->usesFixedRAOffset() ? "" : " (unused)"))
- .str(),
- Header.CFAFixedRAOffset.value());
-
- W.printNumber("Auxiliary header length", Header.AuxHdrLen.value());
- W.printNumber("Num FDEs", Header.NumFDEs.value());
- W.printNumber("Num FREs", Header.NumFREs.value());
- W.printNumber("FRE subsection length", Header.FRELen.value());
- W.printNumber("FDE subsection offset", Header.FDEOff.value());
- W.printNumber("FRE subsection offset", Header.FREOff.value());
+ printSFrameHeader(*Parser);
+ printSFrameFDEs(*Parser);
}
}
diff --git a/llvm/tools/obj2yaml/elf2yaml.cpp b/llvm/tools/obj2yaml/elf2yaml.cpp
index 53455b8..ab15553 100644
--- a/llvm/tools/obj2yaml/elf2yaml.cpp
+++ b/llvm/tools/obj2yaml/elf2yaml.cpp
@@ -281,7 +281,8 @@ template <class ELFT> Expected<ELFYAML::Object *> ELFDumper<ELFT>::dump() {
Y->Header.Type = Obj.getHeader().e_type;
if (Obj.getHeader().e_machine != 0)
Y->Header.Machine = ELFYAML::ELF_EM(Obj.getHeader().e_machine);
- Y->Header.Flags = Obj.getHeader().e_flags;
+ if (Obj.getHeader().e_flags != 0)
+ Y->Header.Flags = ELFYAML::ELF_EF(Obj.getHeader().e_flags);
Y->Header.Entry = Obj.getHeader().e_entry;
// Dump sections
diff --git a/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt b/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt
index deabf11..d9da627 100644
--- a/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt
+++ b/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt
@@ -2,7 +2,7 @@
# libraries, but expects them to exist in the process loading the plugin. This
# doesn't work with DLLs on Windows (where a shared library can't have undefined
# references), so just skip this testcase on Windows.
-if ((NOT WIN32 OR LLVM_BUILD_LLVM_DYLIB) AND NOT CYGWIN)
+if ((NOT WIN32 AND NOT CYGWIN) OR LLVM_BUILD_LLVM_DYLIB)
unset(LLVM_LINK_COMPONENTS)
add_llvm_library(InlineAdvisorPlugin MODULE BUILDTREE_ONLY
InlineAdvisorPlugin.cpp
diff --git a/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt b/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt
index 0b37ceb..941e18efc 100644
--- a/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt
+++ b/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt
@@ -2,7 +2,7 @@
# libraries, but expects them to exist in the process loading the plugin. This
# doesn't work with DLLs on Windows (where a shared library can't have undefined
# references), so just skip this testcase on Windows.
-if ((NOT WIN32 OR LLVM_BUILD_LLVM_DYLIB) AND NOT CYGWIN)
+if ((NOT WIN32 AND NOT CYGWIN) OR LLVM_BUILD_LLVM_DYLIB)
unset(LLVM_LINK_COMPONENTS)
add_llvm_library(InlineOrderPlugin MODULE BUILDTREE_ONLY
InlineOrderPlugin.cpp
diff --git a/llvm/unittests/CodeGen/CMakeLists.txt b/llvm/unittests/CodeGen/CMakeLists.txt
index d19b122..22dbdaa 100644
--- a/llvm/unittests/CodeGen/CMakeLists.txt
+++ b/llvm/unittests/CodeGen/CMakeLists.txt
@@ -42,6 +42,7 @@ add_llvm_unittest(CodeGenTests
ScalableVectorMVTsTest.cpp
SchedBoundary.cpp
SelectionDAGAddressAnalysisTest.cpp
+ SelectionDAGNodeConstructionTest.cpp
SelectionDAGPatternMatchTest.cpp
TypeTraitsTest.cpp
TargetOptionsTest.cpp
diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
index 7928c91..c5992eb 100644
--- a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
@@ -78,7 +78,7 @@ TEST_F(AArch64GISelMITest, BasicLegalizerTest) {
CHECK: %vptr:_(p0) = COPY $x4
CHECK-NEXT: [[LOAD_0:%[0-9]+]]:_(s16) = G_LOAD %vptr:_(p0) :: (load (s8))
CHECK-NEXT: [[OFFSET_1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- CHECK-NEXT: [[VPTR_1:%[0-9]+]]:_(p0) = G_PTR_ADD %vptr:_, [[OFFSET_1]]:_(s64)
+ CHECK-NEXT: [[VPTR_1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %vptr:_, [[OFFSET_1]]:_(s64)
CHECK-NEXT: [[LOAD_1:%[0-9]+]]:_(s16) = G_LOAD [[VPTR_1]]:_(p0) :: (load (s8) from unknown-address + 1)
CHECK-NEXT: %v:_(<2 x s8>) = G_BUILD_VECTOR_TRUNC [[LOAD_0]]:_(s16), [[LOAD_1]]:_(s16)
CHECK-NEXT: $h4 = COPY %v:_(<2 x s8>)
@@ -210,7 +210,7 @@ TEST_F(AArch64GISelMITest, UnorderedArtifactCombiningManyCopiesTest) {
CHECK: %vptr:_(p0) = COPY $x4
CHECK-NEXT: [[LOAD_0:%[0-9]+]]:_(s16) = G_LOAD %vptr:_(p0) :: (load (s8))
CHECK-NEXT: [[OFFSET_1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- CHECK-NEXT: [[VPTR_1:%[0-9]+]]:_(p0) = G_PTR_ADD %vptr:_, [[OFFSET_1]]:_(s64)
+ CHECK-NEXT: [[VPTR_1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %vptr:_, [[OFFSET_1]]:_(s64)
CHECK-NEXT: [[LOAD_1:%[0-9]+]]:_(s16) = G_LOAD [[VPTR_1]]:_(p0) :: (load (s8) from unknown-address + 1)
CHECK-NEXT: [[V0_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD_0]]:_(s16)
CHECK-NEXT: [[FF_MASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
diff --git a/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp b/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
index 0058daf..7ad7a51 100644
--- a/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
+++ b/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
@@ -7,103 +7,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
+#include "SelectionDAGTestBase.h"
#include "llvm/Analysis/MemoryLocation.h"
-#include "llvm/Analysis/OptimizationRemarkEmitter.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/AsmParser/Parser.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/TargetLowering.h"
-#include "llvm/IR/Module.h"
-#include "llvm/MC/TargetRegistry.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/TargetSelect.h"
-#include "llvm/Target/TargetMachine.h"
-#include "gtest/gtest.h"
namespace llvm {
-class SelectionDAGAddressAnalysisTest : public testing::Test {
-protected:
- static void SetUpTestCase() {
- InitializeAllTargets();
- InitializeAllTargetMCs();
- }
-
- void SetUp() override {
- StringRef Assembly = "@g = global i32 0\n"
- "@g_alias = alias i32, i32* @g\n"
- "define i32 @f() {\n"
- " %1 = load i32, i32* @g\n"
- " ret i32 %1\n"
- "}";
-
- Triple TargetTriple("aarch64--");
- std::string Error;
- const Target *T = TargetRegistry::lookupTarget("", TargetTriple, Error);
- // FIXME: These tests do not depend on AArch64 specifically, but we have to
- // initialize a target. A skeleton Target for unittests would allow us to
- // always run these tests.
- if (!T)
- GTEST_SKIP();
-
- TargetOptions Options;
- TM = std::unique_ptr<TargetMachine>(
- T->createTargetMachine(TargetTriple, "", "+sve", Options, std::nullopt,
- std::nullopt, CodeGenOptLevel::Aggressive));
- if (!TM)
- GTEST_SKIP();
-
- SMDiagnostic SMError;
- M = parseAssemblyString(Assembly, SMError, Context);
- if (!M)
- report_fatal_error(SMError.getMessage());
- M->setDataLayout(TM->createDataLayout());
-
- F = M->getFunction("f");
- if (!F)
- report_fatal_error("F?");
- G = M->getGlobalVariable("g");
- if (!G)
- report_fatal_error("G?");
- AliasedG = M->getNamedAlias("g_alias");
- if (!AliasedG)
- report_fatal_error("AliasedG?");
-
- MachineModuleInfo MMI(TM.get());
-
- MF = std::make_unique<MachineFunction>(*F, *TM, *TM->getSubtargetImpl(*F),
- MMI.getContext(), 0);
-
- DAG = std::make_unique<SelectionDAG>(*TM, CodeGenOptLevel::None);
- if (!DAG)
- report_fatal_error("DAG?");
- OptimizationRemarkEmitter ORE(F);
- FunctionAnalysisManager FAM;
- FAM.registerPass([&] { return TM->getTargetIRAnalysis(); });
-
- TargetTransformInfo TTI = TM->getTargetIRAnalysis().run(*F, FAM);
- DAG->init(*MF, ORE, nullptr, nullptr, nullptr, nullptr, nullptr, MMI,
- nullptr, TTI.hasBranchDivergence(F));
- }
-
- TargetLoweringBase::LegalizeTypeAction getTypeAction(EVT VT) {
- return DAG->getTargetLoweringInfo().getTypeAction(Context, VT);
- }
-
- EVT getTypeToTransformTo(EVT VT) {
- return DAG->getTargetLoweringInfo().getTypeToTransformTo(Context, VT);
- }
-
- LLVMContext Context;
- std::unique_ptr<TargetMachine> TM;
- std::unique_ptr<Module> M;
- Function *F;
- GlobalVariable *G;
- GlobalAlias *AliasedG;
- std::unique_ptr<MachineFunction> MF;
- std::unique_ptr<SelectionDAG> DAG;
-};
+class SelectionDAGAddressAnalysisTest : public SelectionDAGTestBase {};
TEST_F(SelectionDAGAddressAnalysisTest, sameFrameObject) {
SDLoc Loc;
diff --git a/llvm/unittests/CodeGen/SelectionDAGNodeConstructionTest.cpp b/llvm/unittests/CodeGen/SelectionDAGNodeConstructionTest.cpp
new file mode 100644
index 0000000..b2c1420
--- /dev/null
+++ b/llvm/unittests/CodeGen/SelectionDAGNodeConstructionTest.cpp
@@ -0,0 +1,317 @@
+//===---- llvm/unittest/CodeGen/SelectionDAGPatternMatchTest.cpp ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SelectionDAGTestBase.h"
+
+using namespace llvm;
+
+class SelectionDAGNodeConstructionTest : public SelectionDAGTestBase {};
+
+TEST_F(SelectionDAGNodeConstructionTest, ADD) {
+ SDLoc DL;
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::ADD, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::ADD, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::ADD, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::ADD, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::ADD, DL, MVT::i32, Op, Undef), Undef);
+ EXPECT_EQ(DAG->getNode(ISD::ADD, DL, MVT::i32, Undef, Op), Undef);
+ EXPECT_EQ(DAG->getNode(ISD::ADD, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, AND) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::AND, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::AND, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::AND, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::AND, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::AND, DL, MVT::i32, Op, Undef), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::AND, DL, MVT::i32, Undef, Op), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::AND, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, MUL) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::MUL, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::MUL, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::MUL, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::MUL, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::MUL, DL, MVT::i32, Op, Undef), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::MUL, DL, MVT::i32, Undef, Op), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::MUL, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, OR) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue AllOnes = DAG->getAllOnesConstant(DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::OR, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::OR, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::OR, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::OR, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::OR, DL, MVT::i32, Op, Undef), AllOnes);
+ EXPECT_EQ(DAG->getNode(ISD::OR, DL, MVT::i32, Undef, Op), AllOnes);
+ EXPECT_EQ(DAG->getNode(ISD::OR, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, SADDSAT) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue AllOnes = DAG->getAllOnesConstant(DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::SADDSAT, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SADDSAT, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SADDSAT, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SADDSAT, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::SADDSAT, DL, MVT::i32, Op, Undef), AllOnes);
+ EXPECT_EQ(DAG->getNode(ISD::SADDSAT, DL, MVT::i32, Undef, Op), AllOnes);
+ EXPECT_EQ(DAG->getNode(ISD::SADDSAT, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, SDIV) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::SDIV, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SDIV, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SDIV, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SDIV, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::SDIV, DL, MVT::i32, Op, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SDIV, DL, MVT::i32, Undef, Op), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::SDIV, DL, MVT::i32, Undef, Undef), Poison);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, SMAX) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue MaxInt = DAG->getConstant(APInt::getSignedMaxValue(32), DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::SMAX, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SMAX, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SMAX, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SMAX, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::SMAX, DL, MVT::i32, Op, Undef), MaxInt);
+ EXPECT_EQ(DAG->getNode(ISD::SMAX, DL, MVT::i32, Undef, Op), MaxInt);
+ EXPECT_EQ(DAG->getNode(ISD::SMAX, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, SMIN) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue MinInt = DAG->getConstant(APInt::getSignedMinValue(32), DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::SMIN, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SMIN, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SMIN, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SMIN, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::SMIN, DL, MVT::i32, Op, Undef), MinInt);
+ EXPECT_EQ(DAG->getNode(ISD::SMIN, DL, MVT::i32, Undef, Op), MinInt);
+ EXPECT_EQ(DAG->getNode(ISD::SMIN, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, SREM) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::SREM, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SREM, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SREM, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SREM, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::SREM, DL, MVT::i32, Op, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SREM, DL, MVT::i32, Undef, Op), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::SREM, DL, MVT::i32, Undef, Undef), Poison);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, SSUBSAT) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::SSUBSAT, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SSUBSAT, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SSUBSAT, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SSUBSAT, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::SSUBSAT, DL, MVT::i32, Op, Undef), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::SSUBSAT, DL, MVT::i32, Undef, Op), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::SSUBSAT, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, SUB) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::SUB, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SUB, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SUB, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::SUB, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::SUB, DL, MVT::i32, Op, Undef), Undef);
+ EXPECT_EQ(DAG->getNode(ISD::SUB, DL, MVT::i32, Undef, Op), Undef);
+ EXPECT_EQ(DAG->getNode(ISD::SUB, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, UADDSAT) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue AllOnes = DAG->getAllOnesConstant(DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::UADDSAT, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UADDSAT, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UADDSAT, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UADDSAT, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::UADDSAT, DL, MVT::i32, Op, Undef), AllOnes);
+ EXPECT_EQ(DAG->getNode(ISD::UADDSAT, DL, MVT::i32, Undef, Op), AllOnes);
+ EXPECT_EQ(DAG->getNode(ISD::UADDSAT, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, UDIV) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::UDIV, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UDIV, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UDIV, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UDIV, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::UDIV, DL, MVT::i32, Op, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UDIV, DL, MVT::i32, Undef, Op), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::UDIV, DL, MVT::i32, Undef, Undef), Poison);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, UMAX) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue AllOnes = DAG->getAllOnesConstant(DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::UMAX, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UMAX, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UMAX, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UMAX, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::UMAX, DL, MVT::i32, Op, Undef), AllOnes);
+ EXPECT_EQ(DAG->getNode(ISD::UMAX, DL, MVT::i32, Undef, Op), AllOnes);
+ EXPECT_EQ(DAG->getNode(ISD::UMAX, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, UMIN) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::UMIN, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UMIN, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UMIN, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UMIN, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::UMIN, DL, MVT::i32, Op, Undef), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::UMIN, DL, MVT::i32, Undef, Op), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::UMIN, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, UREM) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::UREM, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UREM, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UREM, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UREM, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::UREM, DL, MVT::i32, Op, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::UREM, DL, MVT::i32, Undef, Op), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::UREM, DL, MVT::i32, Undef, Undef), Poison);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, USUBSAT) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::USUBSAT, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::USUBSAT, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::USUBSAT, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::USUBSAT, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::USUBSAT, DL, MVT::i32, Op, Undef), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::USUBSAT, DL, MVT::i32, Undef, Op), Zero);
+ EXPECT_EQ(DAG->getNode(ISD::USUBSAT, DL, MVT::i32, Undef, Undef), Undef);
+}
+
+TEST_F(SelectionDAGNodeConstructionTest, XOR) {
+ SDLoc DL;
+ SDValue Op = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, MVT::i32);
+ SDValue Poison = DAG->getPOISON(MVT::i32);
+ SDValue Undef = DAG->getUNDEF(MVT::i32);
+ SDValue Zero = DAG->getConstant(0, DL, MVT::i32);
+
+ EXPECT_EQ(DAG->getNode(ISD::XOR, DL, MVT::i32, Op, Poison), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::XOR, DL, MVT::i32, Poison, Op), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::XOR, DL, MVT::i32, Poison, Undef), Poison);
+ EXPECT_EQ(DAG->getNode(ISD::XOR, DL, MVT::i32, Undef, Poison), Poison);
+
+ EXPECT_EQ(DAG->getNode(ISD::XOR, DL, MVT::i32, Op, Undef), Undef);
+ EXPECT_EQ(DAG->getNode(ISD::XOR, DL, MVT::i32, Undef, Op), Undef);
+ EXPECT_EQ(DAG->getNode(ISD::XOR, DL, MVT::i32, Undef, Undef), Zero);
+}
diff --git a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
index 30a1406..4e0bf38 100644
--- a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
+++ b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
@@ -6,102 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/OptimizationRemarkEmitter.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/AsmParser/Parser.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "SelectionDAGTestBase.h"
#include "llvm/CodeGen/SDPatternMatch.h"
-#include "llvm/CodeGen/TargetLowering.h"
-#include "llvm/IR/Module.h"
-#include "llvm/MC/TargetRegistry.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/TargetSelect.h"
-#include "llvm/Target/TargetMachine.h"
-#include "gtest/gtest.h"
using namespace llvm;
-class SelectionDAGPatternMatchTest : public testing::Test {
-protected:
- static void SetUpTestCase() {
- InitializeAllTargets();
- InitializeAllTargetMCs();
- }
-
- void SetUp() override {
- StringRef Assembly = "@g = global i32 0\n"
- "@g_alias = alias i32, i32* @g\n"
- "define i32 @f() {\n"
- " %1 = load i32, i32* @g\n"
- " ret i32 %1\n"
- "}";
-
- Triple TargetTriple("riscv64--");
- std::string Error;
- const Target *T = TargetRegistry::lookupTarget("", TargetTriple, Error);
- // FIXME: These tests do not depend on RISCV specifically, but we have to
- // initialize a target. A skeleton Target for unittests would allow us to
- // always run these tests.
- if (!T)
- GTEST_SKIP();
-
- TargetOptions Options;
- TM = std::unique_ptr<TargetMachine>(T->createTargetMachine(
- TargetTriple, "", "+m,+f,+d,+v", Options, std::nullopt, std::nullopt,
- CodeGenOptLevel::Aggressive));
- if (!TM)
- GTEST_SKIP();
-
- SMDiagnostic SMError;
- M = parseAssemblyString(Assembly, SMError, Context);
- if (!M)
- report_fatal_error(SMError.getMessage());
- M->setDataLayout(TM->createDataLayout());
-
- F = M->getFunction("f");
- if (!F)
- report_fatal_error("F?");
- G = M->getGlobalVariable("g");
- if (!G)
- report_fatal_error("G?");
- AliasedG = M->getNamedAlias("g_alias");
- if (!AliasedG)
- report_fatal_error("AliasedG?");
-
- MachineModuleInfo MMI(TM.get());
-
- MF = std::make_unique<MachineFunction>(*F, *TM, *TM->getSubtargetImpl(*F),
- MMI.getContext(), 0);
-
- DAG = std::make_unique<SelectionDAG>(*TM, CodeGenOptLevel::None);
- if (!DAG)
- report_fatal_error("DAG?");
- OptimizationRemarkEmitter ORE(F);
- FunctionAnalysisManager FAM;
- FAM.registerPass([&] { return TM->getTargetIRAnalysis(); });
-
- TargetTransformInfo TTI = TM->getTargetIRAnalysis().run(*F, FAM);
- DAG->init(*MF, ORE, nullptr, nullptr, nullptr, nullptr, nullptr, MMI,
- nullptr, TTI.hasBranchDivergence(F));
- }
-
- TargetLoweringBase::LegalizeTypeAction getTypeAction(EVT VT) {
- return DAG->getTargetLoweringInfo().getTypeAction(Context, VT);
- }
-
- EVT getTypeToTransformTo(EVT VT) {
- return DAG->getTargetLoweringInfo().getTypeToTransformTo(Context, VT);
- }
-
- LLVMContext Context;
- std::unique_ptr<TargetMachine> TM;
- std::unique_ptr<Module> M;
- Function *F;
- GlobalVariable *G;
- GlobalAlias *AliasedG;
- std::unique_ptr<MachineFunction> MF;
- std::unique_ptr<SelectionDAG> DAG;
-};
+class SelectionDAGPatternMatchTest : public SelectionDAGTestBase {};
TEST_F(SelectionDAGPatternMatchTest, matchValueType) {
SDLoc DL;
diff --git a/llvm/unittests/CodeGen/SelectionDAGTestBase.h b/llvm/unittests/CodeGen/SelectionDAGTestBase.h
new file mode 100644
index 0000000..edc730d
--- /dev/null
+++ b/llvm/unittests/CodeGen/SelectionDAGTestBase.h
@@ -0,0 +1,99 @@
+//===---- llvm/unittest/CodeGen/SelectionDAGTestBase.h --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Target/TargetMachine.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+class SelectionDAGTestBase : public testing::Test {
+protected:
+ static void SetUpTestCase() {
+ InitializeAllTargets();
+ InitializeAllTargetMCs();
+ }
+
+ void SetUp() override {
+ StringRef Assembly = "@g = global i32 0\n"
+ "@g_alias = alias i32, i32* @g\n"
+ "define i32 @f() {\n"
+ " %1 = load i32, i32* @g\n"
+ " ret i32 %1\n"
+ "}";
+
+ Triple TargetTriple("aarch64--");
+ std::string Error;
+ const Target *T = TargetRegistry::lookupTarget("", TargetTriple, Error);
+ // FIXME: These tests do not depend on AArch64 specifically, but we have to
+ // initialize a target. A skeleton Target for unittests would allow us to
+ // always run these tests.
+ if (!T)
+ GTEST_SKIP();
+
+ TargetOptions Options;
+ TM = std::unique_ptr<TargetMachine>(
+ T->createTargetMachine(TargetTriple, "", "+sve", Options, std::nullopt,
+ std::nullopt, CodeGenOptLevel::Aggressive));
+ if (!TM)
+ GTEST_SKIP();
+
+ SMDiagnostic SMError;
+ M = parseAssemblyString(Assembly, SMError, Context);
+ ASSERT_TRUE(M && "Could not parse module!");
+ M->setDataLayout(TM->createDataLayout());
+
+ F = M->getFunction("f");
+ ASSERT_TRUE(F && "Could not get function f!");
+ G = M->getGlobalVariable("g");
+ ASSERT_TRUE(G && "Could not get global g!");
+ AliasedG = M->getNamedAlias("g_alias");
+ ASSERT_TRUE(AliasedG && "Could not get alias g_alias!");
+
+ MachineModuleInfo MMI(TM.get());
+
+ MF = std::make_unique<MachineFunction>(*F, *TM, *TM->getSubtargetImpl(*F),
+ MMI.getContext(), 0);
+
+ DAG = std::make_unique<SelectionDAG>(*TM, CodeGenOptLevel::None);
+ if (!DAG)
+ reportFatalUsageError("Failed to create SelectionDAG?");
+ OptimizationRemarkEmitter ORE(F);
+ FunctionAnalysisManager FAM;
+ FAM.registerPass([&] { return TM->getTargetIRAnalysis(); });
+
+ TargetTransformInfo TTI = TM->getTargetIRAnalysis().run(*F, FAM);
+ DAG->init(*MF, ORE, nullptr, nullptr, nullptr, nullptr, nullptr, MMI,
+ nullptr, TTI.hasBranchDivergence(F));
+ }
+
+ TargetLoweringBase::LegalizeTypeAction getTypeAction(EVT VT) {
+ return DAG->getTargetLoweringInfo().getTypeAction(Context, VT);
+ }
+
+ EVT getTypeToTransformTo(EVT VT) {
+ return DAG->getTargetLoweringInfo().getTypeToTransformTo(Context, VT);
+ }
+
+ LLVMContext Context;
+ std::unique_ptr<TargetMachine> TM;
+ std::unique_ptr<Module> M;
+ Function *F;
+ GlobalVariable *G;
+ GlobalAlias *AliasedG;
+ std::unique_ptr<MachineFunction> MF;
+ std::unique_ptr<SelectionDAG> DAG;
+};
diff --git a/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp b/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp
index 4a05a29..ec94083 100644
--- a/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp
@@ -1559,16 +1559,11 @@ TEST_F(CoreAPIsStandardTest, TestLookupWithThreadedMaterialization) {
#if LLVM_ENABLE_THREADS
std::mutex WorkThreadsMutex;
- std::vector<std::thread> WorkThreads;
+ SmallVector<std::thread, 0> WorkThreads;
DispatchOverride = [&](std::unique_ptr<Task> T) {
- std::promise<void> WaitP;
std::lock_guard<std::mutex> Lock(WorkThreadsMutex);
WorkThreads.push_back(
- std::thread([T = std::move(T), WaitF = WaitP.get_future()]() mutable {
- WaitF.get();
- T->run();
- }));
- WaitP.set_value();
+ std::thread([T = std::move(T)]() mutable { T->run(); }));
};
cantFail(JD.define(absoluteSymbols({{Foo, FooSym}})));
@@ -1580,8 +1575,15 @@ TEST_F(CoreAPIsStandardTest, TestLookupWithThreadedMaterialization) {
EXPECT_EQ(FooLookupResult.getFlags(), FooSym.getFlags())
<< "lookup returned incorrect flags";
- for (auto &WT : WorkThreads)
+ std::unique_lock<std::mutex> Lock(WorkThreadsMutex);
+ // This works because every child thread that is allowed to use WorkThreads
+ // must either be in WorkThreads or its parent must be in WorkThreads.
+ while (!WorkThreads.empty()) {
+ auto WT = WorkThreads.pop_back_val();
+ Lock.unlock();
WT.join();
+ Lock.lock();
+ }
#endif
}
diff --git a/llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp b/llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp
index 6ab659d..fea9eab 100644
--- a/llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp
@@ -81,7 +81,7 @@ TEST(MemoryMapperTest, InitializeDeinitialize) {
{
// Provide working memory
char *WA1 = Mapper->prepare(Mem1->Start, HW.size() + 1);
- std::strcpy(static_cast<char *>(WA1), HW.c_str());
+ std::strcpy(WA1, HW.c_str());
}
// A structure to be passed to initialize
@@ -106,7 +106,7 @@ TEST(MemoryMapperTest, InitializeDeinitialize) {
{
char *WA2 = Mapper->prepare(Mem1->Start + PageSize, HW.size() + 1);
- std::strcpy(static_cast<char *>(WA2), HW.c_str());
+ std::strcpy(WA2, HW.c_str());
}
MemoryMapper::AllocInfo Alloc2;
@@ -159,7 +159,7 @@ TEST(MemoryMapperTest, InitializeDeinitialize) {
EXPECT_THAT_ERROR(Mem2.takeError(), Succeeded());
char *WA = Mapper->prepare(Mem2->Start, HW.size() + 1);
- std::strcpy(static_cast<char *>(WA), HW.c_str());
+ std::strcpy(WA, HW.c_str());
MemoryMapper::AllocInfo Alloc3;
{
diff --git a/llvm/unittests/Frontend/CMakeLists.txt b/llvm/unittests/Frontend/CMakeLists.txt
index 281d509..6e4ba5d 100644
--- a/llvm/unittests/Frontend/CMakeLists.txt
+++ b/llvm/unittests/Frontend/CMakeLists.txt
@@ -11,6 +11,7 @@ set(LLVM_LINK_COMPONENTS
)
add_llvm_unittest(LLVMFrontendTests
+ HLSLBindingTest.cpp
HLSLRootSignatureDumpTest.cpp
HLSLRootSignatureRangesTest.cpp
OpenACCTest.cpp
diff --git a/llvm/unittests/Frontend/HLSLBindingTest.cpp b/llvm/unittests/Frontend/HLSLBindingTest.cpp
new file mode 100644
index 0000000..ca2f7b5
--- /dev/null
+++ b/llvm/unittests/Frontend/HLSLBindingTest.cpp
@@ -0,0 +1,275 @@
+//===------ HLSLBindingTest.cpp - Resource binding tests ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Frontend/HLSL/HLSLBinding.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/DXILABI.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+using namespace llvm::dxil;
+
+MATCHER_P(HasSpecificValue, Value, "") {
+ return arg.has_value() && *arg == Value;
+}
+
+static void
+checkExpectedSpaceAndFreeRanges(hlsl::BindingInfo::RegisterSpace &RegSpace,
+ uint32_t ExpSpace,
+ ArrayRef<uint32_t> ExpValues) {
+ EXPECT_EQ(RegSpace.Space, ExpSpace);
+ EXPECT_EQ(RegSpace.FreeRanges.size() * 2, ExpValues.size());
+ unsigned I = 0;
+ for (auto &R : RegSpace.FreeRanges) {
+ EXPECT_EQ(R.LowerBound, ExpValues[I]);
+ EXPECT_EQ(R.UpperBound, ExpValues[I + 1]);
+ I += 2;
+ }
+}
+
+TEST(HLSLBindingTest, TestTrivialCase) {
+ hlsl::BindingInfoBuilder Builder;
+
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_FALSE(HasOverlap);
+
+ // check that UAV has exactly one gap
+ hlsl::BindingInfo::BindingSpaces &UAVSpaces =
+ Info.getBindingSpaces(ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.Spaces.size(), 1u);
+ checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0, {0u, 4u, 6u, ~0u});
+
+ // check that other kinds of register spaces are all available
+ for (auto RC :
+ {ResourceClass::SRV, ResourceClass::CBuffer, ResourceClass::Sampler}) {
+ hlsl::BindingInfo::BindingSpaces &Spaces = Info.getBindingSpaces(RC);
+ EXPECT_EQ(Spaces.RC, RC);
+ EXPECT_EQ(Spaces.Spaces.size(), 0u);
+ }
+}
+
+TEST(HLSLBindingTest, TestManyBindings) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // cbuffer CB : register(b3) { int a; }
+ // RWBuffer<float4> A[5] : register(u10, space20);
+ // StructuredBuffer<int> B : register(t5);
+ // RWBuffer<float> C : register(u5);
+ // StructuredBuffer<int> D[5] : register(t0);
+ // RWBuffer<float> E[2] : register(u2);
+ // SamplerState S1 : register(s5, space2);
+ // SamplerState S2 : register(s4, space2);
+ Builder.trackBinding(ResourceClass::CBuffer, /*Space=*/0, /*LowerBound=*/3,
+ /*UpperBound=*/3, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/20, /*LowerBound=*/10,
+ /*UpperBound=*/14, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/0,
+ /*UpperBound=*/4, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/2,
+ /*UpperBound=*/3, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::Sampler, /*Space=*/2, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::Sampler, /*Space=*/2, /*LowerBound=*/4,
+ /*UpperBound=*/4, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_FALSE(HasOverlap);
+
+ hlsl::BindingInfo::BindingSpaces &SRVSpaces =
+ Info.getBindingSpaces(ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.Spaces.size(), 1u);
+ // verify that consecutive bindings are merged
+ // (SRVSpaces has only one free space range {6, ~0u}).
+ checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {6u, ~0u});
+
+ hlsl::BindingInfo::BindingSpaces &UAVSpaces =
+ Info.getBindingSpaces(ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.Spaces.size(), 2u);
+ checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0,
+ {0u, 1u, 4u, 4u, 6u, ~0u});
+ checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[1], 20, {0u, 9u, 15u, ~0u});
+
+ hlsl::BindingInfo::BindingSpaces &CBufferSpaces =
+ Info.getBindingSpaces(ResourceClass::CBuffer);
+ EXPECT_EQ(CBufferSpaces.RC, ResourceClass::CBuffer);
+ EXPECT_EQ(CBufferSpaces.Spaces.size(), 1u);
+ checkExpectedSpaceAndFreeRanges(CBufferSpaces.Spaces[0], 0,
+ {0u, 2u, 4u, ~0u});
+
+ hlsl::BindingInfo::BindingSpaces &SamplerSpaces =
+ Info.getBindingSpaces(ResourceClass::Sampler);
+ EXPECT_EQ(SamplerSpaces.RC, ResourceClass::Sampler);
+ EXPECT_EQ(SamplerSpaces.Spaces.size(), 1u);
+ checkExpectedSpaceAndFreeRanges(SamplerSpaces.Spaces[0], 2,
+ {0u, 3u, 6u, ~0u});
+}
+
+TEST(HLSLBindingTest, TestUnboundedAndOverlap) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // StructuredBuffer<float> A[] : register(t5);
+ // StructuredBuffer<float> B[3] : register(t0);
+ // StructuredBuffer<float> C[] : register(t0, space2);
+ // StructuredBuffer<float> D : register(t4, space2); /* overlapping */
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/0,
+ /*UpperBound=*/2, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/2, /*LowerBound=*/0,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/2, /*LowerBound=*/4,
+ /*UpperBound=*/4, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_TRUE(HasOverlap);
+
+ hlsl::BindingInfo::BindingSpaces &SRVSpaces =
+ Info.getBindingSpaces(ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.Spaces.size(), 2u);
+ checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {3, 4});
+ checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[1], 2, {});
+}
+
+TEST(HLSLBindingTest, TestExactOverlap) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // Since the bindings overlap exactly we need sigil values to differentiate
+ // them.
+ // Note: We initialize these to 0 to suppress a -Wuninitialized-const-pointer,
+ // but we really are just using the stack addresses here.
+ char ID1 = 0;
+ char ID2 = 0;
+
+ // StructuredBuffer<float> A : register(t5);
+ // StructuredBuffer<float> B : register(t5);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/&ID1);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/&ID2);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_TRUE(HasOverlap);
+
+ hlsl::BindingInfo::BindingSpaces &SRVSpaces =
+ Info.getBindingSpaces(ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.Spaces.size(), 1u);
+ checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {0u, 4u, 6u, ~0u});
+}
+
+TEST(HLSLBindingTest, TestEndOfRange) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // RWBuffer<float> A : register(u4294967295); /* UINT32_MAX */
+ // RWBuffer<float> B[10] : register(u4294967286, space1);
+ // /* range (UINT32_MAX - 9, UINT32_MAX )*/
+ // RWBuffer<float> C[10] : register(u2147483647, space2);
+ // /* range (INT32_MAX, INT32_MAX + 9) */
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/~0u,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/1, /*LowerBound=*/~0u - 9u,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/2,
+ /*LowerBound=*/2147483647u,
+ /*UpperBound=*/2147483647u + 9u, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_FALSE(HasOverlap);
+
+ hlsl::BindingInfo::BindingSpaces &UAVSpaces =
+ Info.getBindingSpaces(ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.Spaces.size(), 3u);
+ checkExpectedSpaceAndFreeRanges(
+ UAVSpaces.Spaces[0], 0, {0, std::numeric_limits<uint32_t>::max() - 1});
+ checkExpectedSpaceAndFreeRanges(
+ UAVSpaces.Spaces[1], 1, {0, std::numeric_limits<uint32_t>::max() - 10});
+ checkExpectedSpaceAndFreeRanges(
+ UAVSpaces.Spaces[2], 2,
+ {0, static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) - 1u,
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) + 10u,
+ std::numeric_limits<uint32_t>::max()});
+}
+
+TEST(HLSLBindingTest, TestFindAvailable) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // RWBuffer<float> A : register(u5);
+ // RWBuffer<float> B : register(u5, space1);
+ // RWBuffer<float> C : register(u11, space1);
+ // RWBuffer<float> D[] : register(u1, space2);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/5u,
+ /*UpperBound=*/5u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/1, /*LowerBound=*/2u,
+ /*UpperBound=*/2u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/1, /*LowerBound=*/6u,
+ /*UpperBound=*/6u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/2, /*LowerBound=*/1u,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/3, /*LowerBound=*/~0u - 1,
+ /*UpperBound=*/~0u - 1, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_FALSE(HasOverlap);
+
+ // In space 0, we find room for a small binding at the beginning and
+ // a large binding after `A`'s binding.
+ std::optional<uint32_t> V =
+ Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/0, /*Size=*/1);
+ EXPECT_THAT(V, HasSpecificValue(0u));
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/0, /*Size=*/100);
+ EXPECT_THAT(V, HasSpecificValue(6u));
+
+ // In space 1, we try to fit larger bindings in the fill the gaps. Note that
+ // we do this largest to smallest and observe that the gaps that are earlier
+ // still exist.
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/1, /*Size=*/4);
+ EXPECT_THAT(V, HasSpecificValue(7u));
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/1, /*Size=*/3);
+ EXPECT_THAT(V, HasSpecificValue(3u));
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/1, /*Size=*/2);
+ EXPECT_THAT(V, HasSpecificValue(0u));
+ // At this point, we've used all of the contiguous space up to 11u
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/1, /*Size=*/1);
+ EXPECT_THAT(V, HasSpecificValue(11u));
+
+ // Space 2 is mostly full, we can only fit into the room at the beginning.
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/2, /*Size=*/2);
+ EXPECT_FALSE(V.has_value());
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/2, /*Size=*/1);
+ EXPECT_THAT(V, HasSpecificValue(0u));
+
+ // Finding space for an unbounded array is a bit funnier. it always needs to
+ // go a the end of the available space.
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/3,
+ /*Size=*/~0u);
+ // Note that we end up with a size 1 array here, starting at ~0u.
+ EXPECT_THAT(V, HasSpecificValue(~0u));
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/4,
+ /*Size=*/~0u);
+ // In an empty space we find the slot at the beginning.
+ EXPECT_THAT(V, HasSpecificValue(0u));
+}
diff --git a/llvm/unittests/Support/VirtualFileSystemTest.cpp b/llvm/unittests/Support/VirtualFileSystemTest.cpp
index eb590e4..fc3ccea 100644
--- a/llvm/unittests/Support/VirtualFileSystemTest.cpp
+++ b/llvm/unittests/Support/VirtualFileSystemTest.cpp
@@ -225,7 +225,7 @@ std::string getPosixPath(const Twine &S) {
} // end anonymous namespace
TEST(VirtualFileSystemTest, StatusQueries) {
- IntrusiveRefCntPtr<DummyFileSystem> D(new DummyFileSystem());
+ auto D = makeIntrusiveRefCnt<DummyFileSystem>();
ErrorOr<vfs::Status> Status((std::error_code()));
D->addRegularFile("/foo");
@@ -265,11 +265,11 @@ TEST(VirtualFileSystemTest, StatusQueries) {
}
TEST(VirtualFileSystemTest, BaseOnlyOverlay) {
- IntrusiveRefCntPtr<DummyFileSystem> D(new DummyFileSystem());
+ auto D = makeIntrusiveRefCnt<DummyFileSystem>();
ErrorOr<vfs::Status> Status((std::error_code()));
EXPECT_FALSE(Status = D->status("/foo"));
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(new vfs::OverlayFileSystem(D));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(D);
EXPECT_FALSE(Status = O->status("/foo"));
D->addRegularFile("/foo");
@@ -283,13 +283,12 @@ TEST(VirtualFileSystemTest, BaseOnlyOverlay) {
}
TEST(VirtualFileSystemTest, GetRealPathInOverlay) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("/foo");
Lower->addSymlink("/lower_link");
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
// Regular file.
@@ -312,11 +311,10 @@ TEST(VirtualFileSystemTest, GetRealPathInOverlay) {
}
TEST(VirtualFileSystemTest, OverlayFiles) {
- IntrusiveRefCntPtr<DummyFileSystem> Base(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Top(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Base));
+ auto Base = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Top = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Base);
O->pushOverlay(Middle);
O->pushOverlay(Top);
@@ -351,10 +349,9 @@ TEST(VirtualFileSystemTest, OverlayFiles) {
}
TEST(VirtualFileSystemTest, OverlayDirsNonMerged) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
Lower->addDirectory("/lower-only");
@@ -376,10 +373,9 @@ TEST(VirtualFileSystemTest, OverlayDirsNonMerged) {
TEST(VirtualFileSystemTest, MergedDirPermissions) {
// merged directories get the permissions of the upper dir
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
ErrorOr<vfs::Status> Status((std::error_code()));
@@ -401,12 +397,11 @@ TEST(VirtualFileSystemTest, MergedDirPermissions) {
}
TEST(VirtualFileSystemTest, OverlayIterator) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("/foo");
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
ErrorOr<vfs::Status> Status((std::error_code()));
@@ -784,10 +779,9 @@ static void checkContents(DirIter I, ArrayRef<StringRef> ExpectedOut) {
}
TEST(VirtualFileSystemTest, OverlayIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
std::error_code EC;
@@ -808,11 +802,10 @@ TEST(VirtualFileSystemTest, OverlayIteration) {
}
TEST(VirtualFileSystemTest, OverlayRecursiveIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Middle);
O->pushOverlay(Upper);
@@ -850,11 +843,10 @@ TEST(VirtualFileSystemTest, OverlayRecursiveIteration) {
}
TEST(VirtualFileSystemTest, ThreeLevelIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Middle);
O->pushOverlay(Upper);
@@ -870,11 +862,10 @@ TEST(VirtualFileSystemTest, ThreeLevelIteration) {
}
TEST(VirtualFileSystemTest, HiddenInIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Middle);
O->pushOverlay(Upper);
@@ -913,11 +904,10 @@ TEST(VirtualFileSystemTest, HiddenInIteration) {
}
TEST(VirtualFileSystemTest, Visit) {
- IntrusiveRefCntPtr<DummyFileSystem> Base(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Top(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Base));
+ auto Base = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Top = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Base);
O->pushOverlay(Middle);
O->pushOverlay(Top);
@@ -984,10 +974,9 @@ TEST(OverlayFileSystemTest, PrintOutput) {
}
TEST(OverlayFileSystemTest, Exists) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new NoStatusDummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new NoStatusDummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
Lower->addDirectory("/both");
@@ -1008,8 +997,7 @@ TEST(OverlayFileSystemTest, Exists) {
}
TEST(ProxyFileSystemTest, Basic) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> Base(
- new vfs::InMemoryFileSystem());
+ auto Base = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
vfs::ProxyFileSystem PFS(Base);
Base->addFile("/a", 0, MemoryBuffer::getMemBuffer("test"));
@@ -1606,7 +1594,7 @@ TEST_F(VFSFromYAMLTest, BasicVFSFromYAML) {
}
TEST_F(VFSFromYAMLTest, MappedFiles) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo/bar");
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLString(
@@ -1642,8 +1630,7 @@ TEST_F(VFSFromYAMLTest, MappedFiles) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
// file
@@ -1720,7 +1707,7 @@ TEST_F(VFSFromYAMLTest, MappedFiles) {
}
TEST_F(VFSFromYAMLTest, MappedRoot) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo/bar");
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS =
@@ -1735,8 +1722,7 @@ TEST_F(VFSFromYAMLTest, MappedRoot) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
// file
@@ -1762,7 +1748,7 @@ TEST_F(VFSFromYAMLTest, MappedRoot) {
}
TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlay) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
Lower->addDirectory("//root/bar");
@@ -1783,8 +1769,7 @@ TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlay) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
ErrorOr<vfs::Status> S = O->status("//root/foo");
@@ -1806,7 +1791,7 @@ TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlay) {
}
TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlayNoExternalNames) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
Lower->addDirectory("//root/bar");
@@ -1847,7 +1832,7 @@ TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlayNoExternalNames) {
}
TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlayNoFallthrough) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
Lower->addDirectory("//root/bar");
@@ -1887,13 +1872,12 @@ TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlayNoFallthrough) {
}
TEST_F(VFSFromYAMLTest, ReturnsRequestedPathVFSMiss) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS(
- new vfs::InMemoryFileSystem);
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/foo/a", 0,
MemoryBuffer::getMemBuffer("contents of a"));
ASSERT_FALSE(BaseFS->setCurrentWorkingDirectory("//root/foo"));
auto RemappedFS = vfs::RedirectingFileSystem::create(
- {}, /*UseExternalNames=*/false, *BaseFS);
+ {}, /*UseExternalNames=*/false, BaseFS);
auto OpenedF = RemappedFS->openFileForRead("a");
ASSERT_FALSE(OpenedF.getError());
@@ -1915,8 +1899,7 @@ TEST_F(VFSFromYAMLTest, ReturnsRequestedPathVFSMiss) {
}
TEST_F(VFSFromYAMLTest, ReturnsExternalPathVFSHit) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS(
- new vfs::InMemoryFileSystem);
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/foo/realname", 0,
MemoryBuffer::getMemBuffer("contents of a"));
auto FS =
@@ -1955,7 +1938,7 @@ TEST_F(VFSFromYAMLTest, ReturnsExternalPathVFSHit) {
}
TEST_F(VFSFromYAMLTest, RootRelativeTest) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo/bar");
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS =
@@ -1996,7 +1979,7 @@ TEST_F(VFSFromYAMLTest, RootRelativeTest) {
ASSERT_FALSE(S.getError());
EXPECT_EQ("//root/foo/bar/a", S->getName());
#else
- IntrusiveRefCntPtr<DummyFileSystem> LowerWindows(new DummyFileSystem());
+ auto LowerWindows = makeIntrusiveRefCnt<DummyFileSystem>();
LowerWindows->addDirectory("\\\\root\\foo\\bar");
LowerWindows->addRegularFile("\\\\root\\foo\\bar\\a");
FS = getFromYAMLString("{\n"
@@ -2018,8 +2001,7 @@ TEST_F(VFSFromYAMLTest, RootRelativeTest) {
}
TEST_F(VFSFromYAMLTest, ReturnsInternalPathVFSHit) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS(
- new vfs::InMemoryFileSystem);
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/foo/realname", 0,
MemoryBuffer::getMemBuffer("contents of a"));
auto FS =
@@ -2058,7 +2040,7 @@ TEST_F(VFSFromYAMLTest, ReturnsInternalPathVFSHit) {
}
TEST_F(VFSFromYAMLTest, CaseInsensitive) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLString(
"{ 'case-sensitive': 'false',\n"
@@ -2076,8 +2058,7 @@ TEST_F(VFSFromYAMLTest, CaseInsensitive) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
ErrorOr<vfs::Status> S = O->status("//root/XX");
@@ -2094,7 +2075,7 @@ TEST_F(VFSFromYAMLTest, CaseInsensitive) {
}
TEST_F(VFSFromYAMLTest, CaseSensitive) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLString(
"{ 'case-sensitive': 'true',\n"
@@ -2112,8 +2093,7 @@ TEST_F(VFSFromYAMLTest, CaseSensitive) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
ErrorOr<vfs::Status> SS = O->status("//root/xx");
@@ -2126,7 +2106,7 @@ TEST_F(VFSFromYAMLTest, CaseSensitive) {
}
TEST_F(VFSFromYAMLTest, IllegalVFSFile) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
// invalid YAML at top-level
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLString("{]", Lower);
@@ -2252,7 +2232,7 @@ TEST_F(VFSFromYAMLTest, IllegalVFSFile) {
}
TEST_F(VFSFromYAMLTest, UseExternalName) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/external/file");
IntrusiveRefCntPtr<vfs::FileSystem> FS =
@@ -2304,7 +2284,7 @@ TEST_F(VFSFromYAMLTest, UseExternalName) {
}
TEST_F(VFSFromYAMLTest, MultiComponentPath) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/other");
// file in roots
@@ -2350,7 +2330,7 @@ TEST_F(VFSFromYAMLTest, MultiComponentPath) {
}
TEST_F(VFSFromYAMLTest, TrailingSlashes) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/other");
// file in roots
@@ -2369,7 +2349,7 @@ TEST_F(VFSFromYAMLTest, TrailingSlashes) {
}
TEST_F(VFSFromYAMLTest, DirectoryIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addDirectory("//root/foo/bar");
@@ -2399,8 +2379,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIteration) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
std::error_code EC;
@@ -2416,7 +2395,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationSameDirMultipleEntries) {
if (!supportsSameDirMultipleYAMLEntries())
GTEST_SKIP();
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/zab");
Lower->addDirectory("//root/baz");
Lower->addRegularFile("//root/zab/a");
@@ -2449,8 +2428,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationSameDirMultipleEntries) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
std::error_code EC;
@@ -2461,7 +2439,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationSameDirMultipleEntries) {
TEST_F(VFSFromYAMLTest, RecursiveDirectoryIterationLevel) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/a");
Lower->addDirectory("//root/a/b");
Lower->addDirectory("//root/a/b/c");
@@ -2484,8 +2462,7 @@ TEST_F(VFSFromYAMLTest, RecursiveDirectoryIterationLevel) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
std::error_code EC;
@@ -2503,7 +2480,7 @@ TEST_F(VFSFromYAMLTest, RecursiveDirectoryIterationLevel) {
}
TEST_F(VFSFromYAMLTest, RelativePaths) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
std::error_code EC;
SmallString<128> CWD;
EC = llvm::sys::fs::current_path(CWD);
@@ -2557,7 +2534,7 @@ TEST_F(VFSFromYAMLTest, RelativePaths) {
}
TEST_F(VFSFromYAMLTest, NonFallthroughDirectoryIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addRegularFile("//root/a");
Lower->addRegularFile("//root/b");
@@ -2586,7 +2563,7 @@ TEST_F(VFSFromYAMLTest, NonFallthroughDirectoryIteration) {
}
TEST_F(VFSFromYAMLTest, DirectoryIterationWithDuplicates) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addRegularFile("//root/a");
Lower->addRegularFile("//root/b");
@@ -2614,7 +2591,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationWithDuplicates) {
}
TEST_F(VFSFromYAMLTest, DirectoryIterationErrorInVFSLayer) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2643,7 +2620,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationErrorInVFSLayer) {
}
TEST_F(VFSFromYAMLTest, GetRealPath) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//dir/");
Lower->addRegularFile("/foo");
Lower->addSymlink("/link");
@@ -2695,7 +2672,7 @@ TEST_F(VFSFromYAMLTest, GetRealPath) {
}
TEST_F(VFSFromYAMLTest, WorkingDirectory) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2753,7 +2730,7 @@ TEST_F(VFSFromYAMLTest, WorkingDirectory) {
}
TEST_F(VFSFromYAMLTest, WorkingDirectoryFallthrough) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2835,7 +2812,7 @@ TEST_F(VFSFromYAMLTest, WorkingDirectoryFallthrough) {
}
TEST_F(VFSFromYAMLTest, WorkingDirectoryFallthroughInvalid) {
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2872,7 +2849,7 @@ TEST_F(VFSFromYAMLTest, WorkingDirectoryFallthroughInvalid) {
}
TEST_F(VFSFromYAMLTest, VirtualWorkingDirectory) {
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2928,7 +2905,7 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTest) {
raw_string_ostream OS(Buffer);
VFSWriter.write(OS);
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/a");
Lower->addRegularFile("//root/a/b");
@@ -2978,7 +2955,7 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTest2) {
raw_string_ostream OS(Buffer);
VFSWriter.write(OS);
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLRawString(Buffer, Lower);
EXPECT_NE(FS.get(), nullptr);
}
@@ -3010,7 +2987,7 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTest3) {
raw_string_ostream OS(Buffer);
VFSWriter.write(OS);
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLRawString(Buffer, Lower);
EXPECT_NE(FS.get(), nullptr);
}
@@ -3033,7 +3010,7 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTestHandleDirs) {
// We didn't add a single file - only directories.
EXPECT_EQ(Buffer.find("'type': 'file'"), std::string::npos);
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
Lower->addDirectory("//root/a");
Lower->addDirectory("//root/b");
Lower->addDirectory("//root/c");
@@ -3051,17 +3028,17 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTestHandleDirs) {
}
TEST_F(VFSFromYAMLTest, RedirectingWith) {
- IntrusiveRefCntPtr<DummyFileSystem> Both(new DummyFileSystem());
+ auto Both = makeIntrusiveRefCnt<DummyFileSystem>();
Both->addDirectory("//root/a");
Both->addRegularFile("//root/a/f");
Both->addDirectory("//root/b");
Both->addRegularFile("//root/b/f");
- IntrusiveRefCntPtr<DummyFileSystem> AOnly(new DummyFileSystem());
+ auto AOnly = makeIntrusiveRefCnt<DummyFileSystem>();
AOnly->addDirectory("//root/a");
AOnly->addRegularFile("//root/a/f");
- IntrusiveRefCntPtr<DummyFileSystem> BOnly(new DummyFileSystem());
+ auto BOnly = makeIntrusiveRefCnt<DummyFileSystem>();
BOnly->addDirectory("//root/b");
BOnly->addRegularFile("//root/b/f");
@@ -3166,8 +3143,7 @@ TEST_F(VFSFromYAMLTest, RedirectingWith) {
}
TEST(VFSFromRemappedFilesTest, Basic) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS =
- new vfs::InMemoryFileSystem;
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/b", 0, MemoryBuffer::getMemBuffer("contents of b"));
BaseFS->addFile("//root/c", 0, MemoryBuffer::getMemBuffer("contents of c"));
@@ -3176,7 +3152,7 @@ TEST(VFSFromRemappedFilesTest, Basic) {
{"//root/a/b/c", "//root/c"},
};
auto RemappedFS = vfs::RedirectingFileSystem::create(
- RemappedFiles, /*UseExternalNames=*/false, *BaseFS);
+ RemappedFiles, /*UseExternalNames=*/false, BaseFS);
auto StatA = RemappedFS->status("//root/a/a");
auto StatB = RemappedFS->status("//root/a/b/c");
@@ -3194,8 +3170,7 @@ TEST(VFSFromRemappedFilesTest, Basic) {
}
TEST(VFSFromRemappedFilesTest, UseExternalNames) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS =
- new vfs::InMemoryFileSystem;
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/b", 0, MemoryBuffer::getMemBuffer("contents of b"));
BaseFS->addFile("//root/c", 0, MemoryBuffer::getMemBuffer("contents of c"));
@@ -3204,7 +3179,7 @@ TEST(VFSFromRemappedFilesTest, UseExternalNames) {
{"//root/a/b/c", "//root/c"},
};
auto RemappedFS = vfs::RedirectingFileSystem::create(
- RemappedFiles, /*UseExternalNames=*/true, *BaseFS);
+ RemappedFiles, /*UseExternalNames=*/true, BaseFS);
auto StatA = RemappedFS->status("//root/a/a");
auto StatB = RemappedFS->status("//root/a/b/c");
@@ -3222,8 +3197,7 @@ TEST(VFSFromRemappedFilesTest, UseExternalNames) {
}
TEST(VFSFromRemappedFilesTest, LastMappingWins) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS =
- new vfs::InMemoryFileSystem;
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/b", 0, MemoryBuffer::getMemBuffer("contents of b"));
BaseFS->addFile("//root/c", 0, MemoryBuffer::getMemBuffer("contents of c"));
@@ -3232,9 +3206,9 @@ TEST(VFSFromRemappedFilesTest, LastMappingWins) {
{"//root/a", "//root/c"},
};
auto RemappedFSKeepName = vfs::RedirectingFileSystem::create(
- RemappedFiles, /*UseExternalNames=*/false, *BaseFS);
+ RemappedFiles, /*UseExternalNames=*/false, BaseFS);
auto RemappedFSExternalName = vfs::RedirectingFileSystem::create(
- RemappedFiles, /*UseExternalNames=*/true, *BaseFS);
+ RemappedFiles, /*UseExternalNames=*/true, BaseFS);
auto StatKeepA = RemappedFSKeepName->status("//root/a");
auto StatExternalA = RemappedFSExternalName->status("//root/a");
@@ -3416,7 +3390,7 @@ TEST(RedirectingFileSystemTest, ExternalPaths) {
BaseFS->setCurrentWorkingDirectory("/cwd");
auto CheckFS = makeIntrusiveRefCnt<InterceptorFS>(BaseFS);
auto FS = vfs::RedirectingFileSystem::create({}, /*UseExternalNames=*/false,
- *CheckFS);
+ CheckFS);
FS->status("/a/../b");
FS->openFileForRead("c");
@@ -3442,7 +3416,7 @@ TEST(RedirectingFileSystemTest, ExternalPaths) {
}
TEST(RedirectingFileSystemTest, Exists) {
- IntrusiveRefCntPtr<DummyFileSystem> Dummy(new NoStatusDummyFileSystem());
+ auto Dummy = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
auto YAML =
MemoryBuffer::getMemBuffer("{\n"
" 'version': 0,\n"
@@ -3513,7 +3487,7 @@ TEST(RedirectingFileSystemTest, Exists) {
}
TEST(RedirectingFileSystemTest, ExistsFallback) {
- IntrusiveRefCntPtr<DummyFileSystem> Dummy(new NoStatusDummyFileSystem());
+ auto Dummy = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
auto YAML =
MemoryBuffer::getMemBuffer("{\n"
" 'version': 0,\n"
@@ -3537,7 +3511,7 @@ TEST(RedirectingFileSystemTest, ExistsFallback) {
}
TEST(RedirectingFileSystemTest, ExistsRedirectOnly) {
- IntrusiveRefCntPtr<DummyFileSystem> Dummy(new NoStatusDummyFileSystem());
+ auto Dummy = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
auto YAML =
MemoryBuffer::getMemBuffer("{\n"
" 'version': 0,\n"
diff --git a/llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp b/llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp
index 6cd1a48..d4715be 100644
--- a/llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp
+++ b/llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp
@@ -44,19 +44,6 @@ protected:
delete MAM;
delete Context;
}
-
- void checkExpectedSpaceAndFreeRanges(
- DXILResourceBindingInfo::RegisterSpace &RegSpace, uint32_t ExpSpace,
- ArrayRef<uint32_t> ExpValues) {
- EXPECT_EQ(RegSpace.Space, ExpSpace);
- EXPECT_EQ(RegSpace.FreeRanges.size() * 2, ExpValues.size());
- unsigned I = 0;
- for (auto &R : RegSpace.FreeRanges) {
- EXPECT_EQ(R.LowerBound, ExpValues[I]);
- EXPECT_EQ(R.UpperBound, ExpValues[I + 1]);
- I += 2;
- }
- }
};
TEST_F(ResourceBindingAnalysisTest, TestTrivialCase) {
@@ -76,103 +63,16 @@ entry:
EXPECT_EQ(false, DRBI.hasImplicitBinding());
EXPECT_EQ(false, DRBI.hasOverlappingBinding());
-
- // check that UAV has exactly one gap
- DXILResourceBindingInfo::BindingSpaces &UAVSpaces =
- DRBI.getBindingSpaces(ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0,
- {0, 4, 6, UINT32_MAX});
-
- // check that other kinds of register spaces are all available
- for (auto RC :
- {ResourceClass::SRV, ResourceClass::CBuffer, ResourceClass::Sampler}) {
- DXILResourceBindingInfo::BindingSpaces &Spaces = DRBI.getBindingSpaces(RC);
- EXPECT_EQ(Spaces.RC, RC);
- EXPECT_EQ(Spaces.Spaces.size(), 0u);
- }
-}
-
-TEST_F(ResourceBindingAnalysisTest, TestManyBindings) {
- // cbuffer CB : register(b3) { int a; }
- // RWBuffer<float4> A[5] : register(u10, space20);
- // StructuredBuffer<int> B : register(t5);
- // RWBuffer<float> C : register(u5);
- // StructuredBuffer<int> D[5] : register(t0);
- // RWBuffer<float> E[2] : register(u2);
- // SamplerState S1 : register(s5, space2);
- // SamplerState S2 : register(s4, space2);
- StringRef Assembly = R"(
-%__cblayout_CB = type <{ i32 }>
-define void @main() {
-entry:
- %handleCB = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 4, 0)) @llvm.dx.resource.handlefrombinding(i32 0, i32 3, i32 1, i32 0, i1 false, ptr null)
- %handleA = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 20, i32 10, i32 5, i32 0, i1 false, ptr null)
- %handleB = call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 5, i32 1, i32 0, i1 false, ptr null)
- %handleC = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 5, i32 1, i32 0, i1 false, ptr null)
- %handleD = call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 5, i32 4, i1 false, ptr null)
- %handleE = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 2, i32 2, i32 0, i1 false, ptr null)
- %handleS1 = call target("dx.Sampler", 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 5, i32 1, i32 0, i1 false, ptr null)
- %handleS2 = call target("dx.Sampler", 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 4, i32 1, i32 0, i1 false, ptr null)
- ; duplicate binding for the same resource
- %handleD2 = call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 5, i32 4, i1 false, ptr null)
- ret void
-}
- )";
-
- auto M = parseAsm(Assembly);
-
- DXILResourceBindingInfo &DRBI =
- MAM->getResult<DXILResourceBindingAnalysis>(*M);
-
- EXPECT_EQ(false, DRBI.hasImplicitBinding());
- EXPECT_EQ(false, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &SRVSpaces =
- DRBI.getBindingSpaces(ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.Spaces.size(), 1u);
- // verify that consecutive bindings are merged
- // (SRVSpaces has only one free space range {6, UINT32_MAX}).
- checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {6, UINT32_MAX});
-
- DXILResourceBindingInfo::BindingSpaces &UAVSpaces =
- DRBI.getBindingSpaces(ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.Spaces.size(), 2u);
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0,
- {0, 1, 4, 4, 6, UINT32_MAX});
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[1], 20,
- {0, 9, 15, UINT32_MAX});
-
- DXILResourceBindingInfo::BindingSpaces &CBufferSpaces =
- DRBI.getBindingSpaces(ResourceClass::CBuffer);
- EXPECT_EQ(CBufferSpaces.RC, ResourceClass::CBuffer);
- EXPECT_EQ(CBufferSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(CBufferSpaces.Spaces[0], 0,
- {0, 2, 4, UINT32_MAX});
-
- DXILResourceBindingInfo::BindingSpaces &SamplerSpaces =
- DRBI.getBindingSpaces(ResourceClass::Sampler);
- EXPECT_EQ(SamplerSpaces.RC, ResourceClass::Sampler);
- EXPECT_EQ(SamplerSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(SamplerSpaces.Spaces[0], 2,
- {0, 3, 6, UINT32_MAX});
}
-TEST_F(ResourceBindingAnalysisTest, TestUnboundedAndOverlap) {
- // StructuredBuffer<float> A[] : register(t5);
- // StructuredBuffer<float> B[3] : register(t0);
- // StructuredBuffer<float> C[] : register(t0, space2);
- // StructuredBuffer<float> D : register(t4, space2); /* overlapping */
+TEST_F(ResourceBindingAnalysisTest, TestOverlap) {
+ // StructuredBuffer<float> A[] : register(t0, space2);
+ // StructuredBuffer<float> B : register(t4, space2); /* overlapping */
StringRef Assembly = R"(
define void @main() {
entry:
- %handleA = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 5, i32 -1, i32 10, i1 false, ptr null)
- %handleB = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 3, i32 0, i1 false, ptr null)
- %handleC = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 0, i32 -1, i32 100, i1 false, ptr null)
- %handleD = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 4, i32 1, i32 0, i1 false, ptr null)
+ %handleA = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 0, i32 -1, i32 100, i1 false, ptr null)
+ %handleB = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 4, i32 1, i32 0, i1 false, ptr null)
ret void
}
)";
@@ -184,13 +84,6 @@ entry:
EXPECT_EQ(false, DRBI.hasImplicitBinding());
EXPECT_EQ(true, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &SRVSpaces =
- DRBI.getBindingSpaces(ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.Spaces.size(), 2u);
- checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {3, 4});
- checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[1], 2, {});
}
TEST_F(ResourceBindingAnalysisTest, TestExactOverlap) {
@@ -214,49 +107,6 @@ entry:
EXPECT_EQ(false, DRBI.hasImplicitBinding());
EXPECT_EQ(true, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &SRVSpaces =
- DRBI.getBindingSpaces(ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0,
- {0, 4, 6, UINT32_MAX});
-}
-
-TEST_F(ResourceBindingAnalysisTest, TestEndOfRange) {
- // RWBuffer<float> A : register(u4294967295); /* UINT32_MAX */
- // RWBuffer<float> B[10] : register(u4294967286, space1);
- // /* range (UINT32_MAX - 9, UINT32_MAX )*/
- // RWBuffer<float> C[10] : register(u2147483647, space2);
- // /* range (INT32_MAX, INT32_MAX + 9) */
- StringRef Assembly = R"(
-%__cblayout_CB = type <{ i32 }>
-define void @main() {
-entry:
- %handleA = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 -1, i32 1, i32 0, i1 false, ptr null)
- %handleB = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 1, i32 -10, i32 10, i32 50, i1 false, ptr null)
- %handleC = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 2147483647, i32 10, i32 100, i1 false, ptr null)
- ret void
-}
- )";
-
- auto M = parseAsm(Assembly);
-
- DXILResourceBindingInfo &DRBI =
- MAM->getResult<DXILResourceBindingAnalysis>(*M);
-
- EXPECT_EQ(false, DRBI.hasImplicitBinding());
- EXPECT_EQ(false, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &UAVSpaces =
- DRBI.getBindingSpaces(ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.Spaces.size(), 3u);
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0, {0, UINT32_MAX - 1});
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[1], 1, {0, UINT32_MAX - 10});
- checkExpectedSpaceAndFreeRanges(
- UAVSpaces.Spaces[2], 2,
- {0, (uint32_t)INT32_MAX - 1, (uint32_t)INT32_MAX + 10, UINT32_MAX});
}
TEST_F(ResourceBindingAnalysisTest, TestImplicitFlag) {
@@ -275,15 +125,8 @@ entry:
DXILResourceBindingInfo &DRBI =
MAM->getResult<DXILResourceBindingAnalysis>(*M);
- EXPECT_EQ(true, DRBI.hasImplicitBinding());
- EXPECT_EQ(false, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &UAVSpaces =
- DRBI.getBindingSpaces(ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 100,
- {0, 4, 6, UINT32_MAX});
+ EXPECT_TRUE(DRBI.hasImplicitBinding());
+ EXPECT_FALSE(DRBI.hasOverlappingBinding());
}
} // namespace
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp
index 6214ea3..9e06c42 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp
@@ -287,6 +287,42 @@ TEST_F(VPVerifierTest, BlockOutsideRegionWithParent) {
#endif
}
+TEST_F(VPVerifierTest, NonHeaderPHIInHeader) {
+ VPlan &Plan = getPlan();
+ VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(Type::getInt32Ty(C), 0));
+ auto *CanIV = new VPCanonicalIVPHIRecipe(Zero, {});
+ auto *BranchOnCond = new VPInstruction(VPInstruction::BranchOnCond, {CanIV});
+
+ VPBasicBlock *VPBB1 = Plan.getEntry();
+ VPBasicBlock *VPBB2 = Plan.createVPBasicBlock("header");
+
+ VPBB2->appendRecipe(CanIV);
+
+ PHINode *PHINode = PHINode::Create(Type::getInt32Ty(C), 2);
+ auto *IRPhi = new VPIRPhi(*PHINode);
+ VPBB2->appendRecipe(IRPhi);
+ VPBB2->appendRecipe(BranchOnCond);
+
+ VPRegionBlock *R1 = Plan.createVPRegionBlock(VPBB2, VPBB2, "R1");
+ VPBlockUtils::connectBlocks(VPBB1, R1);
+ VPBlockUtils::connectBlocks(R1, Plan.getScalarHeader());
+
+#if GTEST_HAS_STREAM_REDIRECTION
+ ::testing::internal::CaptureStderr();
+#endif
+ EXPECT_FALSE(verifyVPlanIsValid(Plan));
+#if GTEST_HAS_STREAM_REDIRECTION
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ EXPECT_STREQ(
+ "Found non-header PHI recipe in header VPBB: IR <badref> = phi i32 \n",
+ ::testing::internal::GetCapturedStderr().c_str());
+#else
+ EXPECT_STREQ("Found non-header PHI recipe in header VPBB",
+ ::testing::internal::GetCapturedStderr().c_str());
+#endif
+#endif
+}
+
class VPIRVerifierTest : public VPlanTestIRBase {};
TEST_F(VPIRVerifierTest, testVerifyIRPhi) {
diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp
index fa38d01..6f72b51 100644
--- a/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -250,29 +250,38 @@ void InstrInfoEmitter::emitOperandNameMappings(
// Map of operand names to their ID.
MapVector<StringRef, unsigned> OperandNameToID;
- /// The keys of this map is a map which have OpName ID values as their keys
- /// and instruction operand indices as their values. The values of this map
- /// are lists of instruction names. This map helps to unique entries among
+ /// A key in this map is a vector mapping OpName ID values to instruction
+ /// operand indices or -1 (but without any trailing -1 values which will be
+ /// added later). The corresponding value in this map is the index of that row
+ /// in the emitted OperandMap table. This map helps to unique entries among
/// instructions that have identical OpName -> Operand index mapping.
- std::map<std::map<unsigned, unsigned>, std::vector<StringRef>> OperandMap;
+ MapVector<SmallVector<int>, unsigned> OperandMap;
// Max operand index seen.
unsigned MaxOperandNo = 0;
// Fixed/Predefined instructions do not have UseNamedOperandTable enabled, so
- // we can just skip them.
+ // add a dummy map entry for them.
+ OperandMap.try_emplace({}, 0);
+ unsigned FirstTargetVal = TargetInstructions.front()->EnumVal;
+ SmallVector<unsigned> InstructionIndex(FirstTargetVal, 0);
for (const CodeGenInstruction *Inst : TargetInstructions) {
- if (!Inst->TheDef->getValueAsBit("UseNamedOperandTable"))
+ if (!Inst->TheDef->getValueAsBit("UseNamedOperandTable")) {
+ InstructionIndex.push_back(0);
continue;
- std::map<unsigned, unsigned> OpList;
+ }
+ SmallVector<int> OpList;
for (const auto &Info : Inst->Operands) {
unsigned ID =
OperandNameToID.try_emplace(Info.Name, OperandNameToID.size())
.first->second;
+ OpList.resize(std::max((unsigned)OpList.size(), ID + 1), -1);
OpList[ID] = Info.MIOperandNo;
MaxOperandNo = std::max(MaxOperandNo, Info.MIOperandNo);
}
- OperandMap[OpList].push_back(Inst->TheDef->getName());
+ auto [It, Inserted] =
+ OperandMap.try_emplace(std::move(OpList), OperandMap.size());
+ InstructionIndex.push_back(It->second);
}
const size_t NumOperandNames = OperandNameToID.size();
@@ -302,28 +311,22 @@ void InstrInfoEmitter::emitOperandNameMappings(
StringRef Type = MaxOperandNo <= INT8_MAX ? "int8_t" : "int16_t";
OS << " static constexpr " << Type << " OperandMap[][" << NumOperandNames
<< "] = {\n";
- for (const auto &Entry : OperandMap) {
- const std::map<unsigned, unsigned> &OpList = Entry.first;
-
+ for (const auto &[OpList, _] : OperandMap) {
// Emit a row of the OperandMap table.
OS << " {";
- for (unsigned ID = 0; ID < NumOperandNames; ++ID) {
- auto Iter = OpList.find(ID);
- OS << (Iter != OpList.end() ? (int)Iter->second : -1) << ", ";
- }
+ for (unsigned ID = 0; ID < NumOperandNames; ++ID)
+ OS << (ID < OpList.size() ? OpList[ID] : -1) << ", ";
OS << "},\n";
}
OS << " };\n";
- OS << " switch(Opcode) {\n";
- for (const auto &[TableIndex, Entry] : enumerate(OperandMap)) {
- for (StringRef Name : Entry.second)
- OS << " case " << Namespace << "::" << Name << ":\n";
- OS << " return OperandMap[" << TableIndex
- << "][static_cast<unsigned>(Name)];\n";
- }
- OS << " default: return -1;\n";
- OS << " }\n";
+ Type = OperandMap.size() <= UINT8_MAX + 1 ? "uint8_t" : "uint16_t";
+ OS << " static constexpr " << Type << " InstructionIndex[] = {";
+ for (auto [TableIndex, Entry] : enumerate(InstructionIndex))
+ OS << (TableIndex % 16 == 0 ? "\n " : " ") << Entry << ',';
+ OS << "\n };\n";
+
+ OS << " return OperandMap[InstructionIndex[Opcode]][(unsigned)Name];\n";
} else {
// There are no operands, so no need to emit anything
OS << " return -1;\n";
diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py
index c2829c1..98b4b5d 100644
--- a/llvm/utils/UpdateTestChecks/asm.py
+++ b/llvm/utils/UpdateTestChecks/asm.py
@@ -165,12 +165,10 @@ ASM_FUNCTION_AARCH64_DARWIN_RE = re.compile(
)
ASM_FUNCTION_ARM_DARWIN_RE = re.compile(
- r"@[ \t]--[ \t]Begin[ \t]function[ \t](?P<func>[^ \t]+?)\n"
- r"^[ \t]*\.globl[ \t]*_(?P=func)[ \t]*"
+ r"^[ \t]*\.globl[ \t]*_(?P<func>[^ \t]+)[ \t]*\@[ \t]*--[ \t]Begin[ \t]function[ \t](?P=func)\n"
r"(?P<directives>.*?)"
- r"^_(?P=func):\n[ \t]*"
- r"(?P<body>.*?)"
- r"^[ \t]*@[ \t]--[ \t]End[ \t]function",
+ r"^_(?P=func):.*?\n"
+ r"(?P<body>.*?)(?=^[ \t]*@[ \t]--[ \t]End[ \t]function)",
flags=(re.M | re.S),
)
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn
index 218e36e..e3182b0 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn
@@ -46,6 +46,7 @@ static_library("bugprone") {
"IncorrectRoundingsCheck.cpp",
"InfiniteLoopCheck.cpp",
"IntegerDivisionCheck.cpp",
+ "InvalidEnumDefaultInitializationCheck.cpp",
"LambdaFunctionNameCheck.cpp",
"MacroParenthesesCheck.cpp",
"MacroRepeatedSideEffectsCheck.cpp",
diff --git a/llvm/utils/gn/secondary/lldb/source/Plugins/SymbolFile/DWARF/BUILD.gn b/llvm/utils/gn/secondary/lldb/source/Plugins/SymbolFile/DWARF/BUILD.gn
index 566195e..4e63aa8 100644
--- a/llvm/utils/gn/secondary/lldb/source/Plugins/SymbolFile/DWARF/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/source/Plugins/SymbolFile/DWARF/BUILD.gn
@@ -69,6 +69,7 @@ static_library("DWARF") {
"SymbolFileDWARF.cpp",
"SymbolFileDWARFDebugMap.cpp",
"SymbolFileDWARFDwo.cpp",
+ "SymbolFileWasm.cpp",
"UniqueDWARFASTType.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
index 3d08c3f..d394923 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
@@ -8,6 +8,7 @@ static_library("Analysis") {
"//llvm/include/llvm/Config:config",
"//llvm/lib/BinaryFormat",
"//llvm/lib/IR",
+ "//llvm/lib/Frontend/HLSL",
"//llvm/lib/ProfileData",
"//llvm/lib/Support",
"//llvm/lib/TargetParser",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn
index 4c1c613..fce564e 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn
@@ -6,6 +6,7 @@ static_library("HLSL") {
]
sources = [
"CBuffer.cpp",
+ "HLSLBinding.cpp",
"HLSLResource.cpp",
"HLSLRootSignature.cpp",
"RootSignatureMetadata.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn
index e1d740a..b32b55f 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn
@@ -42,6 +42,7 @@ unittest("CodeGenTests") {
"ScalableVectorMVTsTest.cpp",
"SchedBoundary.cpp",
"SelectionDAGAddressAnalysisTest.cpp",
+ "SelectionDAGNodeConstructionTest.cpp",
"SelectionDAGPatternMatchTest.cpp",
"TargetOptionsTest.cpp",
"TestAsmPrinter.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn
index c29277c..6890c48 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn
@@ -13,6 +13,7 @@ unittest("LLVMFrontendTests") {
"//llvm/lib/Testing/Support",
]
sources = [
+ "HLSLBindingTest.cpp",
"HLSLRootSignatureDumpTest.cpp",
"HLSLRootSignatureRangesTest.cpp",
"OpenACCTest.cpp",
diff --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py
index 1bd5ba8..7290977 100644
--- a/llvm/utils/lit/lit/Test.py
+++ b/llvm/utils/lit/lit/Test.py
@@ -247,6 +247,9 @@ class Test:
# and will be honored when the test result is supplied.
self.xfails = []
+ # Exclude this test if it's xfail.
+ self.exclude_xfail = False
+
# If true, ignore all items in self.xfails.
self.xfail_not = False
diff --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py
index 73db67a..e7cd707 100644
--- a/llvm/utils/lit/lit/TestRunner.py
+++ b/llvm/utils/lit/lit/TestRunner.py
@@ -2175,6 +2175,8 @@ def parseIntegratedTestScript(test, additional_parsers=[], require_script=True):
assert parsed["DEFINE:"] == script
assert parsed["REDEFINE:"] == script
test.xfails += parsed["XFAIL:"] or []
+ if test.exclude_xfail and test.isExpectedToFail():
+ return lit.Test.Result(Test.EXCLUDED, "excluding XFAIL tests")
test.requires += parsed["REQUIRES:"] or []
test.unsupported += parsed["UNSUPPORTED:"] or []
if parsed["ALLOW_RETRIES:"]:
diff --git a/llvm/utils/lit/lit/cl_arguments.py b/llvm/utils/lit/lit/cl_arguments.py
index 3292554..e889515 100644
--- a/llvm/utils/lit/lit/cl_arguments.py
+++ b/llvm/utils/lit/lit/cl_arguments.py
@@ -304,6 +304,16 @@ def parse_args():
default=os.environ.get("LIT_XFAIL_NOT", ""),
)
selection_group.add_argument(
+ "--exclude-xfail",
+ help="exclude XFAIL tests (unless they are in the --xfail-not list). "
+ "Note: This option is implemented in "
+ "lit.TestRunner.parseIntegratedTestScript and so will have no effect on "
+ "test formats that do not call that and do not implement the option "
+ "separately.",
+ default=False,
+ action="store_true",
+ )
+ selection_group.add_argument(
"--num-shards",
dest="numShards",
metavar="M",
diff --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py
index 0939838..9650a0e 100755
--- a/llvm/utils/lit/lit/main.py
+++ b/llvm/utils/lit/lit/main.py
@@ -240,6 +240,8 @@ def mark_xfail(selected_tests, opts):
t.xfails += "*"
if test_file in opts.xfail_not or test_full_name in opts.xfail_not:
t.xfail_not = True
+ if opts.exclude_xfail:
+ t.exclude_xfail = True
def mark_excluded(discovered_tests, selected_tests):
diff --git a/llvm/utils/lit/tests/Inputs/xfail-cl/true-xfail-conditionally.txt b/llvm/utils/lit/tests/Inputs/xfail-cl/true-xfail-conditionally.txt
new file mode 100644
index 0000000..6fdecd6
--- /dev/null
+++ b/llvm/utils/lit/tests/Inputs/xfail-cl/true-xfail-conditionally.txt
@@ -0,0 +1,2 @@
+# XFAIL: this-does-not-exist
+# RUN: true \ No newline at end of file
diff --git a/llvm/utils/lit/tests/xfail-cl.py b/llvm/utils/lit/tests/xfail-cl.py
index ef1bb04..f1e0e33 100644
--- a/llvm/utils/lit/tests/xfail-cl.py
+++ b/llvm/utils/lit/tests/xfail-cl.py
@@ -5,6 +5,18 @@
# RUN: %{inputs}/xfail-cl \
# RUN: | FileCheck --check-prefix=CHECK-FILTER %s
+# RUN: %{lit} --xfail 'false.txt;false2.txt;top-level-suite :: b :: test.txt' \
+# RUN: --exclude-xfail \
+# RUN: %{inputs}/xfail-cl \
+# RUN: | FileCheck --check-prefixes=CHECK-EXCLUDED,CHECK-EXCLUDED-NOOVERRIDE %s
+
+# RUN: %{lit} --xfail 'false.txt;false2.txt;top-level-suite :: b :: test.txt' \
+# RUN: --xfail-not 'true-xfail.txt' \
+# RUN: --exclude-xfail \
+# RUN: %{inputs}/xfail-cl \
+# RUN: | FileCheck --check-prefixes=CHECK-EXCLUDED,CHECK-EXCLUDED-OVERRIDE %s
+
+
# RUN: env LIT_XFAIL='false.txt;false2.txt;top-level-suite :: b :: test.txt' \
# RUN: LIT_XFAIL_NOT='true-xfail.txt;top-level-suite :: a :: test-xfail.txt' \
# RUN: %{lit} %{inputs}/xfail-cl \
@@ -23,7 +35,7 @@
# END.
-# CHECK-FILTER: Testing: 10 tests, {{[0-9]*}} workers
+# CHECK-FILTER: Testing: 11 tests, {{[0-9]*}} workers
# CHECK-FILTER-DAG: {{^}}PASS: top-level-suite :: a :: test.txt
# CHECK-FILTER-DAG: {{^}}XFAIL: top-level-suite :: b :: test.txt
# CHECK-FILTER-DAG: {{^}}XFAIL: top-level-suite :: a :: false.txt
@@ -37,3 +49,17 @@
# CHECK-OVERRIDE: Testing: 1 tests, {{[0-9]*}} workers
# CHECK-OVERRIDE: {{^}}PASS: top-level-suite :: true-xfail.txt
+
+# CHECK-EXCLUDED: Testing: 11 tests, {{[0-9]*}} workers
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: a :: false.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: a :: test-xfail.txt
+# CHECK-EXCLUDED-DAG: {{^}}PASS: top-level-suite :: a :: test.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: b :: false.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: b :: test-xfail.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: b :: test.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: false.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: false2.txt
+# CHECK-EXCLUDED-DAG: {{^}}PASS: top-level-suite :: true-xfail-conditionally.txt
+# CHECK-EXCLUDED-NOOVERRIDE-DAG: {{^}}EXCLUDED: top-level-suite :: true-xfail.txt
+# CHECK-EXCLUDED-OVERRIDE-DAG: {{^}}PASS: top-level-suite :: true-xfail.txt
+# CHECK-EXCLUDED-DAG: {{^}}PASS: top-level-suite :: true.txt
diff --git a/llvm/utils/mlgo-utils/IR2Vec/generateTriplets.py b/llvm/utils/mlgo-utils/IR2Vec/generateTriplets.py
new file mode 100644
index 0000000..c48503e
--- /dev/null
+++ b/llvm/utils/mlgo-utils/IR2Vec/generateTriplets.py
@@ -0,0 +1,304 @@
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+"""IR2Vec Triplet Generator
+
+Generates IR2Vec triplets by applying random optimization levels to LLVM IR files
+and extracting triplets using llvm-ir2vec. Automatically generates preprocessed
+files: entity2id.txt, relation2id.txt, and train2id.txt.
+
+Usage:
+ python generateTriplets.py <llvm_build_dir> <num_optimizations> <ll_file_list> <output_dir>
+"""
+
+import argparse
+import logging
+import os
+import random
+import subprocess
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from pathlib import Path
+from typing import List, Set, Tuple
+
+# Configuration
+OPT_LEVELS = ["O0", "O1", "O2", "O3", "Os", "Oz"]
+DEFAULT_MAX_WORKERS = 100
+
+logger = logging.getLogger(__name__)
+
+
+# TODO: Change this to a dataclass with slots
+# when Python 3.10+ is the minimum version
+# https://docs.python.org/3/library/dataclasses.html#dataclasses.dataclass
+class TripletResult:
+ """Result from processing a single LLVM IR file"""
+
+ __slots__ = ["triplets", "max_relation"]
+
+ def __init__(self, triplets: Set[str], max_relation: int):
+ self.triplets = triplets
+ self.max_relation = max_relation
+
+
+class IR2VecTripletGenerator:
+ """Main class for generating IR2Vec triplets"""
+
+ def __init__(
+ self,
+ llvm_build_dir: Path,
+ num_optimizations: int,
+ output_dir: Path,
+ max_workers: int = DEFAULT_MAX_WORKERS,
+ ):
+ self.llvm_build_dir = llvm_build_dir
+ self.num_optimizations = num_optimizations
+ self.output_dir = output_dir
+ self.max_workers = max_workers
+
+ # Tool paths
+ self.opt_binary = os.path.join(llvm_build_dir, "bin", "opt")
+ self.ir2vec_binary = os.path.join(llvm_build_dir, "bin", "llvm-ir2vec")
+
+ self._validate_setup()
+
+ # Create output directory if it doesn't exist
+ self.output_dir.mkdir(parents=True, exist_ok=True)
+
+ def _validate_setup(self):
+ """Validate that all required tools and paths exist"""
+ if not self.llvm_build_dir.exists():
+ raise FileNotFoundError(
+ f"LLVM build directory not found: {self.llvm_build_dir}"
+ )
+
+ if not os.path.isfile(self.opt_binary) or not os.access(
+ self.opt_binary, os.X_OK
+ ):
+ raise FileNotFoundError(
+ f"opt binary not found or not executable: {self.opt_binary}"
+ )
+
+ if not os.path.isfile(self.ir2vec_binary) or not os.access(
+ self.ir2vec_binary, os.X_OK
+ ):
+ raise FileNotFoundError(
+ f"llvm-ir2vec binary not found or not executable: {self.ir2vec_binary}"
+ )
+
+ if not (1 <= self.num_optimizations <= len(OPT_LEVELS)):
+ raise ValueError(
+ f"Number of optimizations must be between 1-{len(OPT_LEVELS)}"
+ )
+
+ def _select_optimization_levels(self) -> List[str]:
+ """Select unique random optimization levels"""
+ return random.sample(OPT_LEVELS, self.num_optimizations)
+
+ def _process_single_file(self, input_file: Path) -> TripletResult:
+ """Process a single LLVM IR file with multiple optimization levels"""
+ all_triplets = set()
+ max_relation = 1
+ opt_levels = self._select_optimization_levels()
+
+ for opt_level in opt_levels:
+ triplets, file_max_relation = self._run_pipeline(input_file, opt_level)
+ if triplets:
+ all_triplets.update(triplets)
+ max_relation = max(max_relation, file_max_relation)
+ logger.debug(
+ f"Generated {len(triplets)} triplets for {input_file} with {opt_level}"
+ )
+
+ return TripletResult(all_triplets, max_relation)
+
+ def _run_pipeline(self, input_file: Path, opt_level: str) -> Tuple[Set[str], int]:
+ """Run opt | llvm-ir2vec pipeline using subprocess pipes."""
+ try:
+ # Run opt first
+ opt_proc = subprocess.Popen(
+ [self.opt_binary, f"-{opt_level}", str(input_file), "-o", "-"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ )
+
+ # Run llvm-ir2vec with opt's output as input
+ ir2vec_proc = subprocess.Popen(
+ [self.ir2vec_binary, "--mode=triplets", "-", "-o", "-"],
+ stdin=opt_proc.stdout,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ )
+
+ opt_proc.stdout.close()
+ stdout, _ = ir2vec_proc.communicate()
+ opt_proc.wait()
+
+ # Check if either process failed
+ if opt_proc.returncode != 0 or ir2vec_proc.returncode != 0:
+ return set(), 1
+
+ return self._parse_triplet_output(stdout)
+ except (subprocess.SubprocessError, OSError):
+ return set(), 1
+
+ def _parse_triplet_output(self, output: str) -> Tuple[Set[str], int]:
+ """Parse triplet output and extract max relation"""
+ if not output.strip():
+ return set(), 1
+
+ lines = output.strip().split("\n")
+ max_relation = 1
+
+ # Extract max relation from metadata line
+ if lines and lines[0].startswith("MAX_RELATION="):
+ max_relation = int(lines[0].split("=")[1])
+ lines = lines[1:]
+
+ # Remove duplicate triplets by converting to a set
+ return set(lines), max_relation
+
+ def generate_triplets(self, file_list: Path) -> None:
+ """Main method to generate triplets from a list of LLVM IR files"""
+ input_files = self._read_file_list(file_list)
+ logger.info(
+ f"Processing {len(input_files)} files with {self.num_optimizations} "
+ f"optimization levels using {self.max_workers} workers"
+ )
+
+ all_triplets = set()
+ global_max_relation = 1
+
+ with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
+ future_to_file = {
+ executor.submit(self._process_single_file, file): file
+ for file in input_files
+ }
+
+ for future in as_completed(future_to_file):
+ try:
+ result = future.result()
+ all_triplets.update(result.triplets)
+ global_max_relation = max(global_max_relation, result.max_relation)
+ except (subprocess.SubprocessError, OSError, ValueError) as e:
+ file_path = future_to_file[future]
+ logger.error(f"Error processing {file_path}: {e}")
+
+ self._generate_output_files(all_triplets, global_max_relation)
+ logger.info("Processing completed successfully")
+
+ def _read_file_list(self, file_list: Path) -> List[Path]:
+ """Read and validate the list of input files"""
+ input_files = []
+ with open(file_list, "r") as f:
+ for line_num, line in enumerate(f, 1):
+ if line := line.strip():
+ file_path = Path(line)
+ if file_path.exists():
+ input_files.append(file_path)
+ else:
+ logger.warning(f"File not found (line {line_num}): {file_path}")
+
+ if not input_files:
+ raise ValueError("No valid input files found")
+ return input_files
+
+ def _generate_output_files(self, all_triplets: Set[str], max_relation: int) -> None:
+ """Generate the final output files"""
+ logger.info(f"Generating output files with {len(all_triplets)} unique triplets")
+
+ # Write all output files -- train2id.txt, entity2id.txt, relation2id.txt
+ train2id_file = os.path.join(self.output_dir, "train2id.txt")
+ entity2id_file = os.path.join(self.output_dir, "entity2id.txt")
+ relation2id_file = os.path.join(self.output_dir, "relation2id.txt")
+
+ with open(train2id_file, "w") as f:
+ f.write(f"{len(all_triplets)}\n")
+ f.writelines(f"{triplet}\n" for triplet in all_triplets)
+
+ self._generate_entity2id(entity2id_file)
+ self._generate_relation2id(relation2id_file, max_relation)
+
+ def _generate_entity2id(self, output_file: Path) -> None:
+ """Generate entity2id.txt using llvm-ir2vec"""
+ subprocess.run(
+ [str(self.ir2vec_binary), "--mode=entities", "-o", str(output_file)],
+ check=True,
+ capture_output=True,
+ )
+
+ def _generate_relation2id(self, output_file: Path, max_relation: int) -> None:
+ """Generate relation2id.txt from max relation"""
+ max_relation = max(max_relation, 1) # At least Type and Next relations
+ num_relations = max_relation + 1
+
+ with open(output_file, "w") as f:
+ f.write(f"{num_relations}\n")
+ f.write("Type\t0\n")
+ f.write("Next\t1\n")
+ f.writelines(f"Arg{i-2}\t{i}\n" for i in range(2, num_relations))
+
+
+def main():
+ """Main entry point"""
+ parser = argparse.ArgumentParser(
+ description="Generate IR2Vec triplets from LLVM IR files",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+
+ parser.add_argument(
+ "llvm_build_dir", type=Path, help="Path to LLVM build directory"
+ )
+ parser.add_argument(
+ "num_optimizations",
+ type=int,
+ help="Number of optimization levels to apply (1-6)",
+ )
+ parser.add_argument(
+ "ll_file_list",
+ type=Path,
+ help="File containing list of LLVM IR files to process",
+ )
+ parser.add_argument(
+ "output_dir", type=Path, help="Output directory for generated files"
+ )
+ parser.add_argument(
+ "-j",
+ "--max-workers",
+ type=int,
+ default=DEFAULT_MAX_WORKERS,
+ help=f"Maximum number of parallel workers (default: {DEFAULT_MAX_WORKERS})",
+ )
+ parser.add_argument(
+ "-v", "--verbose", action="store_true", help="Enable debug logging"
+ )
+ parser.add_argument(
+ "-q", "--quiet", action="store_true", help="Suppress all output except errors"
+ )
+
+ args = parser.parse_args()
+
+ # Configure logging
+ level = (
+ logging.ERROR
+ if args.quiet
+ else (logging.DEBUG if args.verbose else logging.INFO)
+ )
+ logging.basicConfig(
+ level=level,
+ format="[%(asctime)s] %(levelname)s: %(message)s",
+ datefmt="%H:%M:%S",
+ )
+
+ generator = IR2VecTripletGenerator(
+ args.llvm_build_dir,
+ args.num_optimizations,
+ args.output_dir,
+ args.max_workers,
+ )
+ generator.generate_triplets(args.ll_file_list)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/llvm/utils/release/github-upload-release.py b/llvm/utils/release/github-upload-release.py
index 90c222d..5ed037ee 100755
--- a/llvm/utils/release/github-upload-release.py
+++ b/llvm/utils/release/github-upload-release.py
@@ -45,19 +45,39 @@ def create_release(repo, release, tag=None, name=None, message=None):
# Note that these lines are not length limited because if we do so, GitHub
# assumes that should be how it is laid out on the page. We want GitHub to
# do the reflowing for us instead.
+ #
+ # Once all the atuomatic binary builds have completed, the HTML comments
+ # with UPPERCASE markers in them will be removed to reveal the download
+ # links later. Other lines are surrounded in <!-- --> for release uploaders
+ # to manually uncomment when they upload that package.
message = dedent(
"""\
-LLVM {release} Release
+## LLVM {release} Release
-## Package Types
+<!-- AUTOMATIC_DOWNLOAD_LINKS_BEGIN
+* [Linux x86_64](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-Linux-X64.tar.xz) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-Linux-X64.tar.xz.jsonl))
+* [Linux Arm64](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-Linux-ARM64.tar.xz) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-Linux-ARM64.tar.xz.jsonl))
+AUTOMATIC_DOWNLOAD_LINKS_END -->
+<!-- * [Linux Armv7-a](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/clang+llvm-{release}-armv7a-linux-gnueabihf.tar.gz) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}}/clang+llvm-{release}-armv7a-linux-gnueabihf.tar.gz.sig)) -->
-Each platform has one binary release package. The file name starts with either `LLVM-` or `clang+llvm-` and ends with the platform's name. For example, `LLVM-{release}-Linux-ARM64.tar.xz` contains LLVM binaries for Arm64 Linux.
+<!-- AUTOMATIC_DOWNLOAD_LINKS_BEGIN
+* [macOS Apple Silicon](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-macOS-ARM64.tar.xz) (ARM64) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-macOS-ARM64.tar.xz.jsonl))
+* [macOS Intel](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-macOS-X64.tar.xz) (x86-64) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-macOS-X64.tar.xz.jsonl))
+AUTOMATIC_DOWNLOAD_LINKS_END -->
+
+<!-- * Windows x64 (64-bit): [installer](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-win64.exe) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-win64.exe.sig)), [archive](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/clang+llvm-{release}-x86_64-pc-windows-msvc.tar.xz) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/clang+llvm-{release}-x86_64-pc-windows-msvc.tar.xz.sig)) -->
+<!-- * Windows x86 (32-bit): [installer](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-win32.exe) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-win32.exe.sig)) -->
+<!-- * Windows on Arm (ARM64): [installer](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-woa64.exe) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/LLVM-{release}-woa64.exe.sig)), [archive](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/clang+llvm-{release}-aarch64-pc-windows-msvc.tar.xz) ([signature](https://github.com/llvm/llvm-project/releases/download/llvmorg-{release}/clang+llvm-{release}-aarch64-pc-windows-msvc.tar.xz.sig)) -->
-Except for Windows. Where `LLVM-*.exe` is an installer intended for using LLVM as a toolchain and `clang+llvm-` contains the contents of the installer, plus libraries and tools not normally used in a toolchain. You most likely want the `LLVM-` installer, unless you are developing software which itself uses LLVM, in which case choose `clang+llvm-`.
+Download links will appear here once builds have completed. <!-- AUTOMATIC_DOWNLOAD_LINKS_PLACEHOLDER -->
+
+For any other variants of platform and architecture, check the full list of release packages at the bottom of this release page. If you do not find a release package for your platform, you may be able to find a community built package on the LLVM Discourse forum thread for this release. Remember that these are built by volunteers and may not always be available. If you rely on a platform or configuration that is not one of the defaults, we suggest you use the binaries that your platform provides, or build your own release packages.
+
+## Package Types
-If you do not find a release package for your platform, you may be able to find a community built package on the LLVM Discourse forum thread for this release. Remember that these are built by volunteers and may not always be available.
+Each platform has one binary release package. The file name starts with either `LLVM-` or `clang+llvm-` and ends with the platform's name. For example, `LLVM-{release}-Linux-ARM64.tar.xz` contains LLVM binaries for Arm64 Linux.
-If you rely on a platform or configuration that is not one of the defaults, we suggest you use the binaries that your platform provides, or build your own release packages.
+Except for Windows. Where `LLVM-*.exe` is an installer intended for using LLVM as a toolchain and the archive `clang+llvm-` contains the contents of the installer, plus libraries and tools not normally used in a toolchain. You most likely want the `LLVM-` installer, unless you are developing software which itself uses LLVM, in which case choose `clang+llvm-`.
In addition, source archives are available:
* `<sub-project>-{release}.src.tar.xz` are archives of the sources of specific sub-projects of `llvm-project` (except for `test-suite` which is an archive of the [LLVM Test Suite](https://github.com/llvm/llvm-test-suite)).
@@ -95,9 +115,35 @@ def upload_files(repo, release, files):
print("Done")
+def uncomment_download_links(repo, release):
+ release = repo.get_release("llvmorg-{}".format(release))
+
+ new_message = []
+ to_remove = [
+ "AUTOMATIC_DOWNLOAD_LINKS_BEGIN",
+ "AUTOMATIC_DOWNLOAD_LINKS_END",
+ "AUTOMATIC_DOWNLOAD_LINKS_PLACEHOLDER",
+ ]
+ for line in release.body.splitlines():
+ for comment in to_remove:
+ if comment in line:
+ break
+ else:
+ new_message.append(line)
+
+ release.update_release(
+ name=release.title,
+ message="\n".join(new_message),
+ draft=release.draft,
+ prerelease=release.prerelease,
+ )
+
+
parser = argparse.ArgumentParser()
parser.add_argument(
- "command", type=str, choices=["create", "upload", "check-permissions"]
+ "command",
+ type=str,
+ choices=["create", "upload", "check-permissions", "uncomment_download_links"],
)
# All args
@@ -137,3 +183,5 @@ if args.command == "create":
create_release(llvm_repo, args.release)
if args.command == "upload":
upload_files(llvm_repo, args.release, args.files)
+if args.command == "uncomment_download_links":
+ uncomment_download_links(llvm_repo, args.release)
diff --git a/llvm/utils/update_mir_test_checks.py b/llvm/utils/update_mir_test_checks.py
index ca46e1e..c4ee052 100755
--- a/llvm/utils/update_mir_test_checks.py
+++ b/llvm/utils/update_mir_test_checks.py
@@ -36,7 +36,7 @@ VREG_RE = re.compile(r"(%[0-9]+)(?:\.[a-z0-9_]+)?(?::[a-z0-9_]+)?(?:\([<>a-z0-9
MI_FLAGS_STR = (
r"(frame-setup |frame-destroy |nnan |ninf |nsz |arcp |contract |afn "
r"|reassoc |nuw |nsw |exact |nofpexcept |nomerge |unpredictable "
- r"|noconvergent |nneg |disjoint |nusw |samesign )*"
+ r"|noconvergent |nneg |disjoint |nusw |samesign |inbounds )*"
)
VREG_DEF_FLAGS_STR = r"(?:dead |undef )*"